From c1d394e26431ba9d0567d5204495dbab2da1c9c9 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Sat, 23 Aug 2025 21:27:04 +0900 Subject: [PATCH 1/2] AMDGPU: Fix not diagnosing unaligned VGPRs for vsrc operands This was not checking the alignment requirement for 64-bit operands which accept inline immediates. Not all custom operand types were handled in the switch, so round out with explicit handling of all enum values, and change the default to use the default checks for unhandled cases. Fixes #155095 --- llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 37 ++++++++++++++++--- llvm/test/CodeGen/AMDGPU/remat-vop.mir | 12 +++--- ...rted-unaligned-vgpr-check-vsrc-operand.mir | 34 +++++++++++++++++ 3 files changed, 72 insertions(+), 11 deletions(-) create mode 100644 llvm/test/MachineVerifier/AMDGPU/unsupported-unaligned-vgpr-check-vsrc-operand.mir diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index 69708c47f6c9c..ba37bdb203a7f 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -4933,7 +4933,8 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, int RegClass = Desc.operands()[i].RegClass; - switch (Desc.operands()[i].OperandType) { + const MCOperandInfo &OpInfo = Desc.operands()[i]; + switch (OpInfo.OperandType) { case MCOI::OPERAND_REGISTER: if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) { ErrInfo = "Illegal immediate value for operand."; @@ -4941,15 +4942,31 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, } break; case AMDGPU::OPERAND_REG_IMM_INT32: + case AMDGPU::OPERAND_REG_IMM_INT64: + case AMDGPU::OPERAND_REG_IMM_INT16: case AMDGPU::OPERAND_REG_IMM_FP32: case AMDGPU::OPERAND_REG_IMM_V2FP32: + case AMDGPU::OPERAND_REG_IMM_BF16: + case AMDGPU::OPERAND_REG_IMM_FP16: + case AMDGPU::OPERAND_REG_IMM_FP64: + case AMDGPU::OPERAND_REG_IMM_V2FP16: + case AMDGPU::OPERAND_REG_IMM_V2INT16: + case AMDGPU::OPERAND_REG_IMM_V2INT32: + case AMDGPU::OPERAND_REG_IMM_V2BF16: break; + case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16: + break; + break; + case AMDGPU::OPERAND_REG_INLINE_C_INT16: case AMDGPU::OPERAND_REG_INLINE_C_INT32: - case AMDGPU::OPERAND_REG_INLINE_C_FP32: case AMDGPU::OPERAND_REG_INLINE_C_INT64: + case AMDGPU::OPERAND_REG_INLINE_C_FP32: case AMDGPU::OPERAND_REG_INLINE_C_FP64: - case AMDGPU::OPERAND_REG_INLINE_C_INT16: + case AMDGPU::OPERAND_REG_INLINE_C_BF16: case AMDGPU::OPERAND_REG_INLINE_C_FP16: + case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: + case AMDGPU::OPERAND_REG_INLINE_C_V2BF16: + case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: case AMDGPU::OPERAND_REG_INLINE_AC_INT32: case AMDGPU::OPERAND_REG_INLINE_AC_FP32: case AMDGPU::OPERAND_REG_INLINE_AC_FP64: { @@ -4965,6 +4982,10 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, return false; } break; + case AMDGPU::OPERAND_INPUT_MODS: + case AMDGPU::OPERAND_SDWA_VOPC_DST: + case AMDGPU::OPERAND_KIMM16: + break; case MCOI::OPERAND_IMMEDIATE: case AMDGPU::OPERAND_KIMM32: case AMDGPU::OPERAND_KIMM64: @@ -4976,9 +4997,15 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, ErrInfo = "Expected immediate, but got non-immediate"; return false; } - [[fallthrough]]; + break; + case MCOI::OPERAND_UNKNOWN: + case MCOI::OPERAND_MEMORY: + case MCOI::OPERAND_PCREL: + break; default: - continue; + if (OpInfo.isGenericType()) + continue; + break; } if (!MO.isReg()) diff --git a/llvm/test/CodeGen/AMDGPU/remat-vop.mir b/llvm/test/CodeGen/AMDGPU/remat-vop.mir index 4f6ea44ccf68b..23cf6f005811e 100644 --- a/llvm/test/CodeGen/AMDGPU/remat-vop.mir +++ b/llvm/test/CodeGen/AMDGPU/remat-vop.mir @@ -278,16 +278,16 @@ machineFunctionInfo: body: | bb.0: ; GCN-LABEL: name: test_remat_v_cvt_i32_f64_e64_undef - ; GCN: [[V_CVT_I32_F64_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e64 0, undef %1:vreg_64, 0, 0, implicit $exec, implicit $mode - ; GCN-NEXT: [[V_CVT_I32_F64_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e64 0, undef %1:vreg_64, 0, 0, implicit $exec, implicit $mode - ; GCN-NEXT: [[V_CVT_I32_F64_e64_2:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e64 0, undef %1:vreg_64, 0, 0, implicit $exec, implicit $mode + ; GCN: [[V_CVT_I32_F64_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e64 0, undef %1:vreg_64_align2, 0, 0, implicit $exec, implicit $mode + ; GCN-NEXT: [[V_CVT_I32_F64_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e64 0, undef %1:vreg_64_align2, 0, 0, implicit $exec, implicit $mode + ; GCN-NEXT: [[V_CVT_I32_F64_e64_2:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e64 0, undef %1:vreg_64_align2, 0, 0, implicit $exec, implicit $mode ; GCN-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e64_]] ; GCN-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e64_1]] ; GCN-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e64_2]] ; GCN-NEXT: S_ENDPGM 0 - %1:vgpr_32 = nofpexcept V_CVT_I32_F64_e64 0, undef %0:vreg_64, 0, 0, implicit $exec, implicit $mode - %2:vgpr_32 = nofpexcept V_CVT_I32_F64_e64 0, undef %0:vreg_64, 0, 0, implicit $exec, implicit $mode - %3:vgpr_32 = nofpexcept V_CVT_I32_F64_e64 0, undef %0:vreg_64, 0, 0, implicit $exec, implicit $mode + %1:vgpr_32 = nofpexcept V_CVT_I32_F64_e64 0, undef %0:vreg_64_align2, 0, 0, implicit $exec, implicit $mode + %2:vgpr_32 = nofpexcept V_CVT_I32_F64_e64 0, undef %0:vreg_64_align2, 0, 0, implicit $exec, implicit $mode + %3:vgpr_32 = nofpexcept V_CVT_I32_F64_e64 0, undef %0:vreg_64_align2, 0, 0, implicit $exec, implicit $mode S_NOP 0, implicit %1 S_NOP 0, implicit %2 S_NOP 0, implicit %3 diff --git a/llvm/test/MachineVerifier/AMDGPU/unsupported-unaligned-vgpr-check-vsrc-operand.mir b/llvm/test/MachineVerifier/AMDGPU/unsupported-unaligned-vgpr-check-vsrc-operand.mir new file mode 100644 index 0000000000000..b4652f2b519e7 --- /dev/null +++ b/llvm/test/MachineVerifier/AMDGPU/unsupported-unaligned-vgpr-check-vsrc-operand.mir @@ -0,0 +1,34 @@ +# RUN: not --crash llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -run-pass=none -filetype=null %s 2>&1 | FileCheck -implicit-check-not="Bad machine code" %s + +# 64-bit vsrc operands were not correctly diagnosed with unaligned registers. + +--- +name: uses_unaligned_physreg +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr1_vgpr2 + + ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** + ; CHECK: - instruction: $vcc = V_CMP_NE_U64_e64 0, $vgpr1_vgpr2, implicit $exec + + $vcc = V_CMP_NE_U64_e64 0, $vgpr1_vgpr2, implicit $exec + + ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** + ; CHECK: *** Bad machine code: Illegal physical register for instruction *** + ; CHECK: - instruction: V_CMP_NE_U64_e32 0, $vgpr1_vgpr2, implicit-def $vcc, implicit $exec + V_CMP_NE_U64_e32 0, $vgpr1_vgpr2, implicit-def $vcc, implicit $exec + + %0:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + %1:vgpr_32 = V_MOV_B32_e32 1, implicit $exec + %2:vreg_64 = IMPLICIT_DEF + + ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** + ; CHECK: *** Bad machine code: Illegal virtual register for instruction *** + %3:areg_128_align2 = V_MFMA_F32_4X4X1F32_e64 %0, %1, %2, 0, 0, 0, implicit $mode, implicit $exec + %4:vreg_64 = IMPLICIT_DEF + + ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** + ; CHECK: *** Bad machine code: Illegal virtual register for instruction *** + %5:vreg_128_align2 = V_MFMA_F32_4X4X1F32_vgprcd_e64 %0, %1, %4, 0, 0, 0, implicit $mode, implicit $exec +... From 0eb1f3aaf4d770c7be772b63d4312edb55b5fdcc Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Sun, 24 Aug 2025 09:16:38 +0900 Subject: [PATCH 2/2] AMDGPU: Stop checking if registers are reserved in adjustAllocatableRegClass This function is used to implement TargetInstrInfo::getOpRegClass and conceptually should not depend on the dynamic state of the function. --- llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 2 +- .../AMDGPU/amdgpu-branch-weight-metadata.ll | 36 ++-- ...tor-flatscratchinit-undefined-behavior2.ll | 11 +- .../AMDGPU/av-split-dead-valno-crash.ll | 6 +- .../CodeGen/AMDGPU/buffer-atomic-fadd.f64.ll | 32 ++-- .../CodeGen/AMDGPU/flat-atomic-fadd.f64.ll | 6 +- llvm/test/CodeGen/AMDGPU/flat-scratch.ll | 18 +- .../CodeGen/AMDGPU/fp64-atomics-gfx90a.ll | 156 +++++++++--------- .../CodeGen/AMDGPU/gep-const-address-space.ll | 12 +- .../AMDGPU/global-atomic-fadd.f32-no-rtn.ll | 2 +- .../CodeGen/AMDGPU/global-atomic-fadd.f64.ll | 4 +- .../CodeGen/AMDGPU/global-i16-load-store.ll | 6 +- .../AMDGPU/infer-addrspace-flat-atomic.ll | 12 +- llvm/test/CodeGen/AMDGPU/mad_64_32.ll | 6 +- ...al-regcopy-and-spill-missed-at-regalloc.ll | 8 +- .../AMDGPU/preload-implicit-kernargs.ll | 12 +- .../CodeGen/AMDGPU/preload-kernarg-header.ll | 3 +- llvm/test/CodeGen/AMDGPU/preload-kernargs.ll | 24 +-- .../AMDGPU/ptradd-sdag-optimizations.ll | 13 +- llvm/test/CodeGen/AMDGPU/store-to-constant.ll | 20 +-- .../AMDGPU/tuple-allocation-failure.ll | 10 +- .../AMDGPU/undef-handling-crash-in-ra.ll | 65 ++++---- .../CodeGen/AMDGPU/vector_shuffle.packed.ll | 8 +- 23 files changed, 237 insertions(+), 235 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index ba37bdb203a7f..3b5b3687967c9 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -5942,7 +5942,7 @@ adjustAllocatableRegClass(const GCNSubtarget &ST, const SIRegisterInfo &RI, const MachineRegisterInfo &MRI, const MCInstrDesc &TID, unsigned RCID, bool IsAllocatable) { - if ((IsAllocatable || !ST.hasGFX90AInsts() || !MRI.reservedRegsFrozen()) && + if ((IsAllocatable || !ST.hasGFX90AInsts()) && (((TID.mayLoad() || TID.mayStore()) && !(TID.TSFlags & SIInstrFlags::Spill)) || (TID.TSFlags & (SIInstrFlags::DS | SIInstrFlags::MIMG)))) { diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-branch-weight-metadata.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-branch-weight-metadata.ll index 1da8cd6646d53..9666085341d83 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-branch-weight-metadata.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-branch-weight-metadata.ll @@ -14,9 +14,9 @@ define void @uniform_br_no_metadata(i32 noundef inreg %value, ptr addrspace(8) n ; GFX9-NEXT: s_mov_b32 s6, s19 ; GFX9-NEXT: s_mov_b32 s5, s18 ; GFX9-NEXT: s_mov_b32 s4, s17 -; GFX9-NEXT: v_mov_b32_e32 v0, s16 -; GFX9-NEXT: v_mov_b32_e32 v1, s21 -; GFX9-NEXT: buffer_store_dword v0, v1, s[4:7], 0 offen +; GFX9-NEXT: v_mov_b32_e32 v1, s16 +; GFX9-NEXT: v_mov_b32_e32 v0, s21 +; GFX9-NEXT: buffer_store_dword v1, v0, s[4:7], 0 offen ; GFX9-NEXT: .LBB0_2: ; %if.end ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: s_setpc_b64 s[30:31] @@ -61,9 +61,9 @@ define void @uniform_br_same_weight(i32 noundef inreg %value, ptr addrspace(8) n ; GFX9-NEXT: s_mov_b32 s6, s19 ; GFX9-NEXT: s_mov_b32 s5, s18 ; GFX9-NEXT: s_mov_b32 s4, s17 -; GFX9-NEXT: v_mov_b32_e32 v0, s16 -; GFX9-NEXT: v_mov_b32_e32 v1, s21 -; GFX9-NEXT: buffer_store_dword v0, v1, s[4:7], 0 offen +; GFX9-NEXT: v_mov_b32_e32 v1, s16 +; GFX9-NEXT: v_mov_b32_e32 v0, s21 +; GFX9-NEXT: buffer_store_dword v1, v0, s[4:7], 0 offen ; GFX9-NEXT: .LBB1_2: ; %if.end ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: s_setpc_b64 s[30:31] @@ -108,9 +108,9 @@ define void @uniform_br_then_likely(i32 noundef inreg %value, ptr addrspace(8) n ; GFX9-NEXT: s_mov_b32 s6, s19 ; GFX9-NEXT: s_mov_b32 s5, s18 ; GFX9-NEXT: s_mov_b32 s4, s17 -; GFX9-NEXT: v_mov_b32_e32 v0, s16 -; GFX9-NEXT: v_mov_b32_e32 v1, s21 -; GFX9-NEXT: buffer_store_dword v0, v1, s[4:7], 0 offen +; GFX9-NEXT: v_mov_b32_e32 v1, s16 +; GFX9-NEXT: v_mov_b32_e32 v0, s21 +; GFX9-NEXT: buffer_store_dword v1, v0, s[4:7], 0 offen ; GFX9-NEXT: .LBB2_2: ; %if.end ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: s_setpc_b64 s[30:31] @@ -156,9 +156,9 @@ define void @divergent_br_no_metadata(i32 noundef inreg %value, ptr addrspace(8) ; GFX9-NEXT: s_mov_b32 s6, s19 ; GFX9-NEXT: s_mov_b32 s5, s18 ; GFX9-NEXT: s_mov_b32 s4, s17 -; GFX9-NEXT: v_mov_b32_e32 v0, s16 -; GFX9-NEXT: v_mov_b32_e32 v1, s21 -; GFX9-NEXT: buffer_store_dword v0, v1, s[4:7], 0 offen +; GFX9-NEXT: v_mov_b32_e32 v1, s16 +; GFX9-NEXT: v_mov_b32_e32 v0, s21 +; GFX9-NEXT: buffer_store_dword v1, v0, s[4:7], 0 offen ; GFX9-NEXT: .LBB3_2: ; %if.end ; GFX9-NEXT: s_or_b64 exec, exec, s[8:9] ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -227,9 +227,9 @@ define void @divergent_br_same_weight(i32 noundef inreg %value, ptr addrspace(8) ; GFX9-NEXT: s_mov_b32 s6, s19 ; GFX9-NEXT: s_mov_b32 s5, s18 ; GFX9-NEXT: s_mov_b32 s4, s17 -; GFX9-NEXT: v_mov_b32_e32 v0, s16 -; GFX9-NEXT: v_mov_b32_e32 v1, s21 -; GFX9-NEXT: buffer_store_dword v0, v1, s[4:7], 0 offen +; GFX9-NEXT: v_mov_b32_e32 v1, s16 +; GFX9-NEXT: v_mov_b32_e32 v0, s21 +; GFX9-NEXT: buffer_store_dword v1, v0, s[4:7], 0 offen ; GFX9-NEXT: .LBB4_2: ; %if.end ; GFX9-NEXT: s_or_b64 exec, exec, s[8:9] ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -297,9 +297,9 @@ define void @divergent_br_then_likely(i32 noundef inreg %value, ptr addrspace(8) ; GFX9-NEXT: s_mov_b32 s6, s19 ; GFX9-NEXT: s_mov_b32 s5, s18 ; GFX9-NEXT: s_mov_b32 s4, s17 -; GFX9-NEXT: v_mov_b32_e32 v0, s16 -; GFX9-NEXT: v_mov_b32_e32 v1, s21 -; GFX9-NEXT: buffer_store_dword v0, v1, s[4:7], 0 offen +; GFX9-NEXT: v_mov_b32_e32 v1, s16 +; GFX9-NEXT: v_mov_b32_e32 v0, s21 +; GFX9-NEXT: buffer_store_dword v1, v0, s[4:7], 0 offen ; GFX9-NEXT: ; %bb.2: ; %if.end ; GFX9-NEXT: s_or_b64 exec, exec, s[8:9] ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) diff --git a/llvm/test/CodeGen/AMDGPU/attributor-flatscratchinit-undefined-behavior2.ll b/llvm/test/CodeGen/AMDGPU/attributor-flatscratchinit-undefined-behavior2.ll index 51caa84450ff3..8b2f0a7ed63c1 100644 --- a/llvm/test/CodeGen/AMDGPU/attributor-flatscratchinit-undefined-behavior2.ll +++ b/llvm/test/CodeGen/AMDGPU/attributor-flatscratchinit-undefined-behavior2.ll @@ -166,13 +166,14 @@ define amdgpu_kernel void @with_private_to_flat_addrspacecast_cc_kernel(ptr addr ; GFX942-ARCH-FLAT: ; %bb.0: ; GFX942-ARCH-FLAT-NEXT: s_load_dword s2, s[4:5], 0x0 ; GFX942-ARCH-FLAT-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX942-ARCH-FLAT-NEXT: v_mov_b32_e32 v2, 0 +; GFX942-ARCH-FLAT-NEXT: s_mov_b32 s0, 0 +; GFX942-ARCH-FLAT-NEXT: v_mov_b32_e32 v2, s0 ; GFX942-ARCH-FLAT-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-ARCH-FLAT-NEXT: s_cmp_lg_u32 s2, -1 -; GFX942-ARCH-FLAT-NEXT: s_cselect_b32 s0, s1, 0 -; GFX942-ARCH-FLAT-NEXT: s_cselect_b32 s1, s2, 0 -; GFX942-ARCH-FLAT-NEXT: v_mov_b32_e32 v0, s1 -; GFX942-ARCH-FLAT-NEXT: v_mov_b32_e32 v1, s0 +; GFX942-ARCH-FLAT-NEXT: s_cselect_b32 s1, s1, 0 +; GFX942-ARCH-FLAT-NEXT: s_cselect_b32 s2, s2, 0 +; GFX942-ARCH-FLAT-NEXT: v_mov_b32_e32 v0, s2 +; GFX942-ARCH-FLAT-NEXT: v_mov_b32_e32 v1, s1 ; GFX942-ARCH-FLAT-NEXT: flat_store_dword v[0:1], v2 sc0 sc1 ; GFX942-ARCH-FLAT-NEXT: s_waitcnt vmcnt(0) ; GFX942-ARCH-FLAT-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll b/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll index 614b1e38a530f..37040123ee20c 100644 --- a/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll +++ b/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll @@ -8,7 +8,7 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: s_load_dwordx2 s[10:11], s[4:5], 0x0 ; CHECK-NEXT: s_load_dwordx4 s[12:15], s[4:5], 0x10 ; CHECK-NEXT: v_mov_b32_e32 v1, 0x3e21eeb6 -; CHECK-NEXT: v_mov_b32_e32 v2, 0xa17f65f6 +; CHECK-NEXT: v_mov_b32_e32 v20, 0 ; CHECK-NEXT: s_waitcnt lgkmcnt(0) ; CHECK-NEXT: s_bitcmp1_b32 s0, 0 ; CHECK-NEXT: s_cselect_b64 s[16:17], -1, 0 @@ -22,6 +22,7 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: s_xor_b64 s[20:21], s[2:3], -1 ; CHECK-NEXT: s_and_b64 s[2:3], exec, s[2:3] ; CHECK-NEXT: v_accvgpr_write_b32 a2, v0 +; CHECK-NEXT: v_mov_b32_e32 v2, 0xa17f65f6 ; CHECK-NEXT: v_mov_b32_e32 v3, 0xbe927e4f ; CHECK-NEXT: v_mov_b32_e32 v4, 0x19f4ec90 ; CHECK-NEXT: v_mov_b32_e32 v5, 0x3efa01a0 @@ -42,7 +43,7 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: v_mov_b32_e32 v18, 0x55555523 ; CHECK-NEXT: v_mov_b32_e32 v19, 0xbfd55555 ; CHECK-NEXT: s_and_b64 s[6:7], exec, s[18:19] -; CHECK-NEXT: v_mov_b32_e32 v20, 0 +; CHECK-NEXT: v_mov_b32_e32 v21, v20 ; CHECK-NEXT: ; implicit-def: $vgpr30_vgpr31 ; CHECK-NEXT: ; implicit-def: $vgpr22_vgpr23 ; CHECK-NEXT: s_branch .LBB0_2 @@ -155,7 +156,6 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: s_cbranch_vccz .LBB0_1 ; CHECK-NEXT: ; %bb.16: ; %._crit_edge2105.i.i.i2330 ; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1 -; CHECK-NEXT: v_mov_b32_e32 v21, v20 ; CHECK-NEXT: s_mov_b64 s[24:25], 0 ; CHECK-NEXT: global_store_dwordx2 v20, v[20:21], s[12:13] ; CHECK-NEXT: s_branch .LBB0_1 diff --git a/llvm/test/CodeGen/AMDGPU/buffer-atomic-fadd.f64.ll b/llvm/test/CodeGen/AMDGPU/buffer-atomic-fadd.f64.ll index 96b191d5acead..2ce54f8a463c7 100644 --- a/llvm/test/CodeGen/AMDGPU/buffer-atomic-fadd.f64.ll +++ b/llvm/test/CodeGen/AMDGPU/buffer-atomic-fadd.f64.ll @@ -18,7 +18,7 @@ define amdgpu_ps void @buffer_atomic_fadd_f64_offset_no_rtn(double %val, <4 x i3 ; GFX90A_GFX942-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY1]], %subreg.sub3 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX90A_GFX942-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] + ; GFX90A_GFX942-NEXT: [[COPY7:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE1]] ; GFX90A_GFX942-NEXT: BUFFER_ATOMIC_ADD_F64_OFFSET killed [[COPY7]], killed [[REG_SEQUENCE]], [[COPY]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) ; GFX90A_GFX942-NEXT: S_ENDPGM 0 %ret = call double @llvm.amdgcn.raw.buffer.atomic.fadd.f64(double %val, <4 x i32> %rsrc, i32 0, i32 %soffset, i32 0) @@ -40,7 +40,7 @@ define amdgpu_ps void @buffer_atomic_fadd_f64_offen_no_rtn(double %val, <4 x i32 ; GFX90A_GFX942-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY4]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY2]], %subreg.sub3 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX90A_GFX942-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] + ; GFX90A_GFX942-NEXT: [[COPY8:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE1]] ; GFX90A_GFX942-NEXT: BUFFER_ATOMIC_ADD_F64_OFFEN killed [[COPY8]], [[COPY1]], killed [[REG_SEQUENCE]], [[COPY]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) ; GFX90A_GFX942-NEXT: S_ENDPGM 0 %ret = call double @llvm.amdgcn.raw.buffer.atomic.fadd.f64(double %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) @@ -62,7 +62,7 @@ define amdgpu_ps void @buffer_atomic_fadd_f64_idxen_no_rtn(double %val, <4 x i32 ; GFX90A_GFX942-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY4]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY2]], %subreg.sub3 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX90A_GFX942-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] + ; GFX90A_GFX942-NEXT: [[COPY8:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE1]] ; GFX90A_GFX942-NEXT: BUFFER_ATOMIC_ADD_F64_IDXEN killed [[COPY8]], [[COPY1]], killed [[REG_SEQUENCE]], [[COPY]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) ; GFX90A_GFX942-NEXT: S_ENDPGM 0 %ret = call double @llvm.amdgcn.struct.buffer.atomic.fadd.f64(double %val, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0) @@ -86,7 +86,7 @@ define amdgpu_ps void @buffer_atomic_fadd_f64_bothen_no_rtn(double %val, <4 x i3 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY3]], %subreg.sub3 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY7]], %subreg.sub1 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX90A_GFX942-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] + ; GFX90A_GFX942-NEXT: [[COPY9:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE1]] ; GFX90A_GFX942-NEXT: BUFFER_ATOMIC_ADD_F64_BOTHEN killed [[COPY9]], killed [[REG_SEQUENCE2]], killed [[REG_SEQUENCE]], [[COPY]], 0, 2, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) ; GFX90A_GFX942-NEXT: S_ENDPGM 0 %ret = call double @llvm.amdgcn.struct.buffer.atomic.fadd.f64(double %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2) @@ -107,7 +107,7 @@ define amdgpu_ps double @buffer_atomic_fadd_f64_offset_rtn(double %val, <4 x i32 ; GFX90A_GFX942-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY1]], %subreg.sub3 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX90A_GFX942-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] + ; GFX90A_GFX942-NEXT: [[COPY7:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE1]] ; GFX90A_GFX942-NEXT: [[BUFFER_ATOMIC_ADD_F64_OFFSET_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_ADD_F64_OFFSET_RTN [[COPY7]], killed [[REG_SEQUENCE]], [[COPY]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) ; GFX90A_GFX942-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_F64_OFFSET_RTN]].sub0 ; GFX90A_GFX942-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY8]], implicit $exec @@ -135,7 +135,7 @@ define amdgpu_ps double @buffer_atomic_fadd_f64_offen_rtn(double %val, <4 x i32> ; GFX90A_GFX942-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY4]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY2]], %subreg.sub3 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX90A_GFX942-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] + ; GFX90A_GFX942-NEXT: [[COPY8:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE1]] ; GFX90A_GFX942-NEXT: [[BUFFER_ATOMIC_ADD_F64_OFFEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_ADD_F64_OFFEN_RTN [[COPY8]], [[COPY1]], killed [[REG_SEQUENCE]], [[COPY]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) ; GFX90A_GFX942-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_F64_OFFEN_RTN]].sub0 ; GFX90A_GFX942-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY9]], implicit $exec @@ -163,7 +163,7 @@ define amdgpu_ps double @buffer_atomic_fadd_f64_idxen_rtn(double %val, <4 x i32> ; GFX90A_GFX942-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY4]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY2]], %subreg.sub3 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX90A_GFX942-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] + ; GFX90A_GFX942-NEXT: [[COPY8:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE1]] ; GFX90A_GFX942-NEXT: [[BUFFER_ATOMIC_ADD_F64_IDXEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_ADD_F64_IDXEN_RTN [[COPY8]], [[COPY1]], killed [[REG_SEQUENCE]], [[COPY]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) ; GFX90A_GFX942-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_F64_IDXEN_RTN]].sub0 ; GFX90A_GFX942-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY9]], implicit $exec @@ -193,7 +193,7 @@ define amdgpu_ps double @buffer_atomic_fadd_f64_bothen_rtn(double %val, <4 x i32 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY3]], %subreg.sub3 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY7]], %subreg.sub1 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX90A_GFX942-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] + ; GFX90A_GFX942-NEXT: [[COPY9:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE1]] ; GFX90A_GFX942-NEXT: [[BUFFER_ATOMIC_ADD_F64_BOTHEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_ADD_F64_BOTHEN_RTN [[COPY9]], killed [[REG_SEQUENCE2]], killed [[REG_SEQUENCE]], [[COPY]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) ; GFX90A_GFX942-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_F64_BOTHEN_RTN]].sub0 ; GFX90A_GFX942-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY10]], implicit $exec @@ -226,7 +226,7 @@ define amdgpu_ps void @buffer_ptr_atomic_fadd_f64_offset_no_rtn(double %val, ptr ; GFX90A_GFX942-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub0 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY10]], %subreg.sub0, killed [[COPY9]], %subreg.sub1, killed [[COPY8]], %subreg.sub2, killed [[COPY7]], %subreg.sub3 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX90A_GFX942-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE3]] + ; GFX90A_GFX942-NEXT: [[COPY11:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE3]] ; GFX90A_GFX942-NEXT: BUFFER_ATOMIC_ADD_F64_OFFSET killed [[COPY11]], killed [[REG_SEQUENCE2]], [[COPY]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64) on %ir.rsrc, align 1, addrspace 8) ; GFX90A_GFX942-NEXT: S_ENDPGM 0 %ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f64(double %val, ptr addrspace(8) %rsrc, i32 0, i32 %soffset, i32 0) @@ -254,7 +254,7 @@ define amdgpu_ps void @buffer_ptr_atomic_fadd_f64_offen_no_rtn(double %val, ptr ; GFX90A_GFX942-NEXT: [[COPY11:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub0 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY11]], %subreg.sub0, killed [[COPY10]], %subreg.sub1, killed [[COPY9]], %subreg.sub2, killed [[COPY8]], %subreg.sub3 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX90A_GFX942-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE3]] + ; GFX90A_GFX942-NEXT: [[COPY12:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE3]] ; GFX90A_GFX942-NEXT: BUFFER_ATOMIC_ADD_F64_OFFEN killed [[COPY12]], [[COPY1]], killed [[REG_SEQUENCE2]], [[COPY]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64) on %ir.rsrc, align 1, addrspace 8) ; GFX90A_GFX942-NEXT: S_ENDPGM 0 %ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f64(double %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 0) @@ -282,7 +282,7 @@ define amdgpu_ps void @buffer_ptr_atomic_fadd_f64_idxen_no_rtn(double %val, ptr ; GFX90A_GFX942-NEXT: [[COPY11:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub0 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY11]], %subreg.sub0, killed [[COPY10]], %subreg.sub1, killed [[COPY9]], %subreg.sub2, killed [[COPY8]], %subreg.sub3 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX90A_GFX942-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE3]] + ; GFX90A_GFX942-NEXT: [[COPY12:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE3]] ; GFX90A_GFX942-NEXT: BUFFER_ATOMIC_ADD_F64_IDXEN killed [[COPY12]], [[COPY1]], killed [[REG_SEQUENCE2]], [[COPY]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64) on %ir.rsrc, align 1, addrspace 8) ; GFX90A_GFX942-NEXT: S_ENDPGM 0 %ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f64(double %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0) @@ -312,7 +312,7 @@ define amdgpu_ps void @buffer_ptr_atomic_fadd_f64_bothen_no_rtn(double %val, ptr ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY12]], %subreg.sub0, killed [[COPY11]], %subreg.sub1, killed [[COPY10]], %subreg.sub2, killed [[COPY9]], %subreg.sub3 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY7]], %subreg.sub1 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX90A_GFX942-NEXT: [[COPY13:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE3]] + ; GFX90A_GFX942-NEXT: [[COPY13:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE3]] ; GFX90A_GFX942-NEXT: BUFFER_ATOMIC_ADD_F64_BOTHEN killed [[COPY13]], killed [[REG_SEQUENCE4]], killed [[REG_SEQUENCE2]], [[COPY]], 0, 2, implicit $exec :: (volatile dereferenceable load store (s64) on %ir.rsrc, align 1, addrspace 8) ; GFX90A_GFX942-NEXT: S_ENDPGM 0 %ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f64(double %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2) @@ -339,7 +339,7 @@ define amdgpu_ps double @buffer_ptr_atomic_fadd_f64_offset_rtn(double %val, ptr ; GFX90A_GFX942-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub0 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY10]], %subreg.sub0, killed [[COPY9]], %subreg.sub1, killed [[COPY8]], %subreg.sub2, killed [[COPY7]], %subreg.sub3 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX90A_GFX942-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE3]] + ; GFX90A_GFX942-NEXT: [[COPY11:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE3]] ; GFX90A_GFX942-NEXT: [[BUFFER_ATOMIC_ADD_F64_OFFSET_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_ADD_F64_OFFSET_RTN [[COPY11]], killed [[REG_SEQUENCE2]], [[COPY]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64) on %ir.rsrc, align 1, addrspace 8) ; GFX90A_GFX942-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_F64_OFFSET_RTN]].sub0 ; GFX90A_GFX942-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY12]], implicit $exec @@ -373,7 +373,7 @@ define amdgpu_ps double @buffer_ptr_atomic_fadd_f64_offen_rtn(double %val, ptr a ; GFX90A_GFX942-NEXT: [[COPY11:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub0 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY11]], %subreg.sub0, killed [[COPY10]], %subreg.sub1, killed [[COPY9]], %subreg.sub2, killed [[COPY8]], %subreg.sub3 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX90A_GFX942-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE3]] + ; GFX90A_GFX942-NEXT: [[COPY12:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE3]] ; GFX90A_GFX942-NEXT: [[BUFFER_ATOMIC_ADD_F64_OFFEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_ADD_F64_OFFEN_RTN [[COPY12]], [[COPY1]], killed [[REG_SEQUENCE2]], [[COPY]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64) on %ir.rsrc, align 1, addrspace 8) ; GFX90A_GFX942-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_F64_OFFEN_RTN]].sub0 ; GFX90A_GFX942-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY13]], implicit $exec @@ -407,7 +407,7 @@ define amdgpu_ps double @buffer_ptr_atomic_fadd_f64_idxen_rtn(double %val, ptr a ; GFX90A_GFX942-NEXT: [[COPY11:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub0 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY11]], %subreg.sub0, killed [[COPY10]], %subreg.sub1, killed [[COPY9]], %subreg.sub2, killed [[COPY8]], %subreg.sub3 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX90A_GFX942-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE3]] + ; GFX90A_GFX942-NEXT: [[COPY12:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE3]] ; GFX90A_GFX942-NEXT: [[BUFFER_ATOMIC_ADD_F64_IDXEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_ADD_F64_IDXEN_RTN [[COPY12]], [[COPY1]], killed [[REG_SEQUENCE2]], [[COPY]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64) on %ir.rsrc, align 1, addrspace 8) ; GFX90A_GFX942-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_F64_IDXEN_RTN]].sub0 ; GFX90A_GFX942-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY13]], implicit $exec @@ -443,7 +443,7 @@ define amdgpu_ps double @buffer_ptr_atomic_fadd_f64_bothen_rtn(double %val, ptr ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY12]], %subreg.sub0, killed [[COPY11]], %subreg.sub1, killed [[COPY10]], %subreg.sub2, killed [[COPY9]], %subreg.sub3 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY7]], %subreg.sub1 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX90A_GFX942-NEXT: [[COPY13:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE3]] + ; GFX90A_GFX942-NEXT: [[COPY13:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE3]] ; GFX90A_GFX942-NEXT: [[BUFFER_ATOMIC_ADD_F64_BOTHEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_ADD_F64_BOTHEN_RTN [[COPY13]], killed [[REG_SEQUENCE4]], killed [[REG_SEQUENCE2]], [[COPY]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64) on %ir.rsrc, align 1, addrspace 8) ; GFX90A_GFX942-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_F64_BOTHEN_RTN]].sub0 ; GFX90A_GFX942-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY14]], implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/flat-atomic-fadd.f64.ll b/llvm/test/CodeGen/AMDGPU/flat-atomic-fadd.f64.ll index 370b43a7f436c..2049b4dabffd8 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-atomic-fadd.f64.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-atomic-fadd.f64.ll @@ -18,7 +18,7 @@ define amdgpu_ps void @flat_atomic_fadd_f64_no_rtn_intrinsic(ptr %ptr, double %d ; GFX90A_GFX942-NEXT: [[DEF3:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1 ; GFX90A_GFX942-NEXT: [[COPY4:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] - ; GFX90A_GFX942-NEXT: [[COPY5:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]] + ; GFX90A_GFX942-NEXT: [[COPY5:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE]] ; GFX90A_GFX942-NEXT: FLAT_ATOMIC_ADD_F64 killed [[COPY4]], killed [[COPY5]], 0, 0, implicit $exec, implicit $flat_scr :: (load store syncscope("agent") seq_cst (s64) on %ir.ptr) ; GFX90A_GFX942-NEXT: S_ENDPGM 0 %ret = call double @llvm.amdgcn.flat.atomic.fadd.f64.p1.f64(ptr %ptr, double %data) @@ -70,7 +70,7 @@ define amdgpu_ps void @flat_atomic_fadd_f64_no_rtn_atomicrmw(ptr %ptr, double %d ; GFX90A_GFX942-NEXT: [[DEF3:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1 ; GFX90A_GFX942-NEXT: [[COPY4:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] - ; GFX90A_GFX942-NEXT: [[COPY5:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]] + ; GFX90A_GFX942-NEXT: [[COPY5:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE]] ; GFX90A_GFX942-NEXT: FLAT_ATOMIC_ADD_F64 killed [[COPY4]], killed [[COPY5]], 0, 0, implicit $exec, implicit $flat_scr :: (load store syncscope("wavefront") monotonic (s64) on %ir.ptr) ; GFX90A_GFX942-NEXT: S_ENDPGM 0 %ret = atomicrmw fadd ptr %ptr, double %data syncscope("wavefront") monotonic, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0 @@ -93,7 +93,7 @@ define amdgpu_ps void @flat_atomic_fadd_f64_no_rtn_atomicrmw_noprivate(ptr %ptr, ; GFX90A_GFX942-NEXT: [[DEF3:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1 ; GFX90A_GFX942-NEXT: [[COPY4:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] - ; GFX90A_GFX942-NEXT: [[COPY5:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]] + ; GFX90A_GFX942-NEXT: [[COPY5:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE]] ; GFX90A_GFX942-NEXT: FLAT_ATOMIC_ADD_F64 killed [[COPY4]], killed [[COPY5]], 0, 0, implicit $exec, implicit $flat_scr :: (load store syncscope("wavefront") monotonic (s64) on %ir.ptr) ; GFX90A_GFX942-NEXT: S_ENDPGM 0 %ret = atomicrmw fadd ptr %ptr, double %data syncscope("wavefront") monotonic, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0 diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll index fc8883924dfbc..fadcc39c95f47 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll @@ -463,7 +463,8 @@ define amdgpu_kernel void @store_load_sindex_kernel(i32 %idx) { ; GFX942-LABEL: store_load_sindex_kernel: ; GFX942: ; %bb.0: ; %bb ; GFX942-NEXT: s_load_dword s0, s[4:5], 0x24 -; GFX942-NEXT: v_mov_b32_e32 v0, 15 +; GFX942-NEXT: s_mov_b32 s1, 15 +; GFX942-NEXT: v_mov_b32_e32 v0, s1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-NEXT: s_lshl_b32 s1, s0, 2 ; GFX942-NEXT: s_and_b32 s0, s0, 15 @@ -610,8 +611,9 @@ define amdgpu_ps void @store_load_sindex_foo(i32 inreg %idx) { ; ; GFX942-LABEL: store_load_sindex_foo: ; GFX942: ; %bb.0: ; %bb +; GFX942-NEXT: s_mov_b32 s2, 15 ; GFX942-NEXT: s_lshl_b32 s1, s0, 2 -; GFX942-NEXT: v_mov_b32_e32 v0, 15 +; GFX942-NEXT: v_mov_b32_e32 v0, s2 ; GFX942-NEXT: s_and_b32 s0, s0, 15 ; GFX942-NEXT: scratch_store_dword off, v0, s1 sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -1588,7 +1590,8 @@ define amdgpu_kernel void @store_load_sindex_small_offset_kernel(i32 %idx) { ; GFX942-NEXT: s_load_dword s0, s[4:5], 0x24 ; GFX942-NEXT: scratch_load_dword v0, off, off sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_mov_b32_e32 v0, 15 +; GFX942-NEXT: s_mov_b32 s1, 15 +; GFX942-NEXT: v_mov_b32_e32 v0, s1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-NEXT: s_lshl_b32 s1, s0, 2 ; GFX942-NEXT: s_and_b32 s0, s0, 15 @@ -1805,9 +1808,10 @@ define amdgpu_ps void @store_load_sindex_small_offset_foo(i32 inreg %idx) { ; GFX942-NEXT: scratch_load_dword v0, off, off sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_lshl_b32 s1, s0, 2 +; GFX942-NEXT: s_mov_b32 s2, 15 ; GFX942-NEXT: s_and_b32 s0, s0, 15 ; GFX942-NEXT: s_addk_i32 s1, 0x100 -; GFX942-NEXT: v_mov_b32_e32 v0, 15 +; GFX942-NEXT: v_mov_b32_e32 v0, s2 ; GFX942-NEXT: s_lshl_b32 s0, s0, 2 ; GFX942-NEXT: scratch_store_dword off, v0, s1 sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -2884,7 +2888,8 @@ define amdgpu_kernel void @store_load_sindex_large_offset_kernel(i32 %idx) { ; GFX942-NEXT: s_load_dword s0, s[4:5], 0x24 ; GFX942-NEXT: scratch_load_dword v0, off, off offset:4 sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_mov_b32_e32 v0, 15 +; GFX942-NEXT: s_mov_b32 s1, 15 +; GFX942-NEXT: v_mov_b32_e32 v0, s1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-NEXT: s_lshl_b32 s1, s0, 2 ; GFX942-NEXT: s_and_b32 s0, s0, 15 @@ -3101,9 +3106,10 @@ define amdgpu_ps void @store_load_sindex_large_offset_foo(i32 inreg %idx) { ; GFX942-NEXT: scratch_load_dword v0, off, off offset:4 sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_lshl_b32 s1, s0, 2 +; GFX942-NEXT: s_mov_b32 s2, 15 ; GFX942-NEXT: s_and_b32 s0, s0, 15 ; GFX942-NEXT: s_addk_i32 s1, 0x4004 -; GFX942-NEXT: v_mov_b32_e32 v0, 15 +; GFX942-NEXT: v_mov_b32_e32 v0, s2 ; GFX942-NEXT: s_lshl_b32 s0, s0, 2 ; GFX942-NEXT: scratch_store_dword off, v0, s1 sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) diff --git a/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll index 0cb2b0b7df3d2..3856f0c327495 100644 --- a/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll +++ b/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll @@ -24,9 +24,9 @@ define amdgpu_kernel void @raw_buffer_atomic_add_noret_f64(<4 x i32> %rsrc, doub ; GFX90A-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[6:7], s[6:7] op_sel:[0,1] -; GFX90A-NEXT: v_mov_b32_e32 v2, s8 -; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 offen +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[6:7], s[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_mov_b32_e32 v0, s8 +; GFX90A-NEXT: buffer_atomic_add_f64 v[2:3], v0, s[0:3], 0 offen ; GFX90A-NEXT: s_endpgm ; ; GFX942-LABEL: raw_buffer_atomic_add_noret_f64: @@ -35,9 +35,9 @@ define amdgpu_kernel void @raw_buffer_atomic_add_noret_f64(<4 x i32> %rsrc, doub ; GFX942-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[6:7] -; GFX942-NEXT: v_mov_b32_e32 v2, s8 -; GFX942-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 offen +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[6:7] +; GFX942-NEXT: v_mov_b32_e32 v0, s8 +; GFX942-NEXT: buffer_atomic_add_f64 v[2:3], v0, s[0:3], 0 offen ; GFX942-NEXT: s_endpgm ; ; GFX1250-LABEL: raw_buffer_atomic_add_noret_f64: @@ -143,9 +143,9 @@ define amdgpu_kernel void @raw_ptr_buffer_atomic_add_noret_f64(ptr addrspace(8) ; GFX90A-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[6:7], s[6:7] op_sel:[0,1] -; GFX90A-NEXT: v_mov_b32_e32 v2, s8 -; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 offen +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[6:7], s[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_mov_b32_e32 v0, s8 +; GFX90A-NEXT: buffer_atomic_add_f64 v[2:3], v0, s[0:3], 0 offen ; GFX90A-NEXT: s_endpgm ; ; GFX942-LABEL: raw_ptr_buffer_atomic_add_noret_f64: @@ -154,9 +154,9 @@ define amdgpu_kernel void @raw_ptr_buffer_atomic_add_noret_f64(ptr addrspace(8) ; GFX942-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[6:7] -; GFX942-NEXT: v_mov_b32_e32 v2, s8 -; GFX942-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 offen +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[6:7] +; GFX942-NEXT: v_mov_b32_e32 v0, s8 +; GFX942-NEXT: buffer_atomic_add_f64 v[2:3], v0, s[0:3], 0 offen ; GFX942-NEXT: s_endpgm ; ; GFX1250-LABEL: raw_ptr_buffer_atomic_add_noret_f64: @@ -262,9 +262,9 @@ define amdgpu_kernel void @struct_buffer_atomic_add_noret_f64(<4 x i32> %rsrc, d ; GFX90A-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[6:7], s[6:7] op_sel:[0,1] -; GFX90A-NEXT: v_mov_b32_e32 v2, s8 -; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 idxen +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[6:7], s[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_mov_b32_e32 v0, s8 +; GFX90A-NEXT: buffer_atomic_add_f64 v[2:3], v0, s[0:3], 0 idxen ; GFX90A-NEXT: s_endpgm ; ; GFX942-LABEL: struct_buffer_atomic_add_noret_f64: @@ -273,9 +273,9 @@ define amdgpu_kernel void @struct_buffer_atomic_add_noret_f64(<4 x i32> %rsrc, d ; GFX942-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[6:7] -; GFX942-NEXT: v_mov_b32_e32 v2, s8 -; GFX942-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 idxen +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[6:7] +; GFX942-NEXT: v_mov_b32_e32 v0, s8 +; GFX942-NEXT: buffer_atomic_add_f64 v[2:3], v0, s[0:3], 0 idxen ; GFX942-NEXT: s_endpgm ; ; GFX1250-LABEL: struct_buffer_atomic_add_noret_f64: @@ -380,9 +380,9 @@ define amdgpu_kernel void @struct_ptr_buffer_atomic_add_noret_f64(ptr addrspace( ; GFX90A-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[6:7], s[6:7] op_sel:[0,1] -; GFX90A-NEXT: v_mov_b32_e32 v2, s8 -; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 idxen +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[6:7], s[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_mov_b32_e32 v0, s8 +; GFX90A-NEXT: buffer_atomic_add_f64 v[2:3], v0, s[0:3], 0 idxen ; GFX90A-NEXT: s_endpgm ; ; GFX942-LABEL: struct_ptr_buffer_atomic_add_noret_f64: @@ -391,9 +391,9 @@ define amdgpu_kernel void @struct_ptr_buffer_atomic_add_noret_f64(ptr addrspace( ; GFX942-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[6:7] -; GFX942-NEXT: v_mov_b32_e32 v2, s8 -; GFX942-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 idxen +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[6:7] +; GFX942-NEXT: v_mov_b32_e32 v0, s8 +; GFX942-NEXT: buffer_atomic_add_f64 v[2:3], v0, s[0:3], 0 idxen ; GFX942-NEXT: s_endpgm ; ; GFX1250-LABEL: struct_ptr_buffer_atomic_add_noret_f64: @@ -498,9 +498,9 @@ define amdgpu_kernel void @raw_buffer_atomic_min_noret_f64(<4 x i32> %rsrc, doub ; GFX90A-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[6:7], s[6:7] op_sel:[0,1] -; GFX90A-NEXT: v_mov_b32_e32 v2, s8 -; GFX90A-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 offen +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[6:7], s[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_mov_b32_e32 v0, s8 +; GFX90A-NEXT: buffer_atomic_min_f64 v[2:3], v0, s[0:3], 0 offen ; GFX90A-NEXT: s_endpgm ; ; GFX942-LABEL: raw_buffer_atomic_min_noret_f64: @@ -509,9 +509,9 @@ define amdgpu_kernel void @raw_buffer_atomic_min_noret_f64(<4 x i32> %rsrc, doub ; GFX942-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[6:7] -; GFX942-NEXT: v_mov_b32_e32 v2, s8 -; GFX942-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 offen +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[6:7] +; GFX942-NEXT: v_mov_b32_e32 v0, s8 +; GFX942-NEXT: buffer_atomic_min_f64 v[2:3], v0, s[0:3], 0 offen ; GFX942-NEXT: s_endpgm ; ; GFX1250-LABEL: raw_buffer_atomic_min_noret_f64: @@ -617,9 +617,9 @@ define amdgpu_kernel void @raw_ptr_buffer_atomic_min_noret_f64(ptr addrspace(8) ; GFX90A-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[6:7], s[6:7] op_sel:[0,1] -; GFX90A-NEXT: v_mov_b32_e32 v2, s8 -; GFX90A-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 offen +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[6:7], s[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_mov_b32_e32 v0, s8 +; GFX90A-NEXT: buffer_atomic_min_f64 v[2:3], v0, s[0:3], 0 offen ; GFX90A-NEXT: s_endpgm ; ; GFX942-LABEL: raw_ptr_buffer_atomic_min_noret_f64: @@ -628,9 +628,9 @@ define amdgpu_kernel void @raw_ptr_buffer_atomic_min_noret_f64(ptr addrspace(8) ; GFX942-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[6:7] -; GFX942-NEXT: v_mov_b32_e32 v2, s8 -; GFX942-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 offen +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[6:7] +; GFX942-NEXT: v_mov_b32_e32 v0, s8 +; GFX942-NEXT: buffer_atomic_min_f64 v[2:3], v0, s[0:3], 0 offen ; GFX942-NEXT: s_endpgm ; ; GFX1250-LABEL: raw_ptr_buffer_atomic_min_noret_f64: @@ -736,9 +736,9 @@ define amdgpu_kernel void @struct_buffer_atomic_min_noret_f64(<4 x i32> %rsrc, d ; GFX90A-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[6:7], s[6:7] op_sel:[0,1] -; GFX90A-NEXT: v_mov_b32_e32 v2, s8 -; GFX90A-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 idxen +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[6:7], s[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_mov_b32_e32 v0, s8 +; GFX90A-NEXT: buffer_atomic_min_f64 v[2:3], v0, s[0:3], 0 idxen ; GFX90A-NEXT: s_endpgm ; ; GFX942-LABEL: struct_buffer_atomic_min_noret_f64: @@ -747,9 +747,9 @@ define amdgpu_kernel void @struct_buffer_atomic_min_noret_f64(<4 x i32> %rsrc, d ; GFX942-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[6:7] -; GFX942-NEXT: v_mov_b32_e32 v2, s8 -; GFX942-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 idxen +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[6:7] +; GFX942-NEXT: v_mov_b32_e32 v0, s8 +; GFX942-NEXT: buffer_atomic_min_f64 v[2:3], v0, s[0:3], 0 idxen ; GFX942-NEXT: s_endpgm ; ; GFX1250-LABEL: struct_buffer_atomic_min_noret_f64: @@ -854,9 +854,9 @@ define amdgpu_kernel void @struct_ptr_buffer_atomic_min_noret_f64(ptr addrspace( ; GFX90A-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[6:7], s[6:7] op_sel:[0,1] -; GFX90A-NEXT: v_mov_b32_e32 v2, s8 -; GFX90A-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 idxen +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[6:7], s[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_mov_b32_e32 v0, s8 +; GFX90A-NEXT: buffer_atomic_min_f64 v[2:3], v0, s[0:3], 0 idxen ; GFX90A-NEXT: s_endpgm ; ; GFX942-LABEL: struct_ptr_buffer_atomic_min_noret_f64: @@ -865,9 +865,9 @@ define amdgpu_kernel void @struct_ptr_buffer_atomic_min_noret_f64(ptr addrspace( ; GFX942-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[6:7] -; GFX942-NEXT: v_mov_b32_e32 v2, s8 -; GFX942-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 idxen +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[6:7] +; GFX942-NEXT: v_mov_b32_e32 v0, s8 +; GFX942-NEXT: buffer_atomic_min_f64 v[2:3], v0, s[0:3], 0 idxen ; GFX942-NEXT: s_endpgm ; ; GFX1250-LABEL: struct_ptr_buffer_atomic_min_noret_f64: @@ -972,9 +972,9 @@ define amdgpu_kernel void @raw_buffer_atomic_max_noret_f64(<4 x i32> %rsrc, doub ; GFX90A-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[6:7], s[6:7] op_sel:[0,1] -; GFX90A-NEXT: v_mov_b32_e32 v2, s8 -; GFX90A-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 offen +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[6:7], s[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_mov_b32_e32 v0, s8 +; GFX90A-NEXT: buffer_atomic_max_f64 v[2:3], v0, s[0:3], 0 offen ; GFX90A-NEXT: s_endpgm ; ; GFX942-LABEL: raw_buffer_atomic_max_noret_f64: @@ -983,9 +983,9 @@ define amdgpu_kernel void @raw_buffer_atomic_max_noret_f64(<4 x i32> %rsrc, doub ; GFX942-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[6:7] -; GFX942-NEXT: v_mov_b32_e32 v2, s8 -; GFX942-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 offen +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[6:7] +; GFX942-NEXT: v_mov_b32_e32 v0, s8 +; GFX942-NEXT: buffer_atomic_max_f64 v[2:3], v0, s[0:3], 0 offen ; GFX942-NEXT: s_endpgm ; ; GFX1250-LABEL: raw_buffer_atomic_max_noret_f64: @@ -1091,9 +1091,9 @@ define amdgpu_kernel void @raw_ptr_buffer_atomic_max_noret_f64(ptr addrspace(8) ; GFX90A-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[6:7], s[6:7] op_sel:[0,1] -; GFX90A-NEXT: v_mov_b32_e32 v2, s8 -; GFX90A-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 offen +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[6:7], s[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_mov_b32_e32 v0, s8 +; GFX90A-NEXT: buffer_atomic_max_f64 v[2:3], v0, s[0:3], 0 offen ; GFX90A-NEXT: s_endpgm ; ; GFX942-LABEL: raw_ptr_buffer_atomic_max_noret_f64: @@ -1102,9 +1102,9 @@ define amdgpu_kernel void @raw_ptr_buffer_atomic_max_noret_f64(ptr addrspace(8) ; GFX942-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[6:7] -; GFX942-NEXT: v_mov_b32_e32 v2, s8 -; GFX942-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 offen +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[6:7] +; GFX942-NEXT: v_mov_b32_e32 v0, s8 +; GFX942-NEXT: buffer_atomic_max_f64 v[2:3], v0, s[0:3], 0 offen ; GFX942-NEXT: s_endpgm ; ; GFX1250-LABEL: raw_ptr_buffer_atomic_max_noret_f64: @@ -1210,9 +1210,9 @@ define amdgpu_kernel void @struct_buffer_atomic_max_noret_f64(<4 x i32> %rsrc, d ; GFX90A-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[6:7], s[6:7] op_sel:[0,1] -; GFX90A-NEXT: v_mov_b32_e32 v2, s8 -; GFX90A-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 idxen +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[6:7], s[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_mov_b32_e32 v0, s8 +; GFX90A-NEXT: buffer_atomic_max_f64 v[2:3], v0, s[0:3], 0 idxen ; GFX90A-NEXT: s_endpgm ; ; GFX942-LABEL: struct_buffer_atomic_max_noret_f64: @@ -1221,9 +1221,9 @@ define amdgpu_kernel void @struct_buffer_atomic_max_noret_f64(<4 x i32> %rsrc, d ; GFX942-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[6:7] -; GFX942-NEXT: v_mov_b32_e32 v2, s8 -; GFX942-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 idxen +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[6:7] +; GFX942-NEXT: v_mov_b32_e32 v0, s8 +; GFX942-NEXT: buffer_atomic_max_f64 v[2:3], v0, s[0:3], 0 idxen ; GFX942-NEXT: s_endpgm ; ; GFX1250-LABEL: struct_buffer_atomic_max_noret_f64: @@ -1328,9 +1328,9 @@ define amdgpu_kernel void @struct_ptr_buffer_atomic_max_noret_f64(ptr addrspace( ; GFX90A-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[6:7], s[6:7] op_sel:[0,1] -; GFX90A-NEXT: v_mov_b32_e32 v2, s8 -; GFX90A-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 idxen +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[6:7], s[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_mov_b32_e32 v0, s8 +; GFX90A-NEXT: buffer_atomic_max_f64 v[2:3], v0, s[0:3], 0 idxen ; GFX90A-NEXT: s_endpgm ; ; GFX942-LABEL: struct_ptr_buffer_atomic_max_noret_f64: @@ -1339,9 +1339,9 @@ define amdgpu_kernel void @struct_ptr_buffer_atomic_max_noret_f64(ptr addrspace( ; GFX942-NEXT: s_load_dword s8, s[4:5], 0x3c ; GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[6:7] -; GFX942-NEXT: v_mov_b32_e32 v2, s8 -; GFX942-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 idxen +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[6:7] +; GFX942-NEXT: v_mov_b32_e32 v0, s8 +; GFX942-NEXT: buffer_atomic_max_f64 v[2:3], v0, s[0:3], 0 idxen ; GFX942-NEXT: s_endpgm ; ; GFX1250-LABEL: struct_ptr_buffer_atomic_max_noret_f64: @@ -2079,9 +2079,9 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret(ptr addrspace(3) %ptr, do ; GFX90A-NEXT: s_load_dword s2, s[4:5], 0x24 ; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2c ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_mov_b32_e32 v2, s2 -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1] -; GFX90A-NEXT: ds_add_f64 v2, v[0:1] +; GFX90A-NEXT: v_mov_b32_e32 v0, s2 +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[0:1], s[0:1] op_sel:[0,1] +; GFX90A-NEXT: ds_add_f64 v0, v[2:3] ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) ; GFX90A-NEXT: s_endpgm ; @@ -2090,9 +2090,9 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret(ptr addrspace(3) %ptr, do ; GFX942-NEXT: s_load_dword s2, s[4:5], 0x24 ; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2c ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_mov_b32_e32 v2, s2 -; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[0:1] -; GFX942-NEXT: ds_add_f64 v2, v[0:1] +; GFX942-NEXT: v_mov_b32_e32 v0, s2 +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX942-NEXT: ds_add_f64 v0, v[2:3] ; GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-NEXT: s_endpgm ; diff --git a/llvm/test/CodeGen/AMDGPU/gep-const-address-space.ll b/llvm/test/CodeGen/AMDGPU/gep-const-address-space.ll index b24ebbd9435cf..9db760077a853 100644 --- a/llvm/test/CodeGen/AMDGPU/gep-const-address-space.ll +++ b/llvm/test/CodeGen/AMDGPU/gep-const-address-space.ll @@ -38,9 +38,9 @@ define protected amdgpu_kernel void @IllegalGEPConst(i32 %a, ptr addrspace(1) %b ; CHECK-NEXT: s_mov_b64 s[4:5], -1 ; CHECK-NEXT: s_cbranch_vccz .LBB0_5 ; CHECK-NEXT: ; %bb.4: ; %atomicrmw.global -; CHECK-NEXT: v_mov_b32_e32 v2, 0 -; CHECK-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] -; CHECK-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1] +; CHECK-NEXT: global_atomic_add_f64 v0, v[2:3], s[0:1] ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: buffer_wbinvl1_vol ; CHECK-NEXT: s_mov_b64 s[4:5], 0 @@ -62,9 +62,9 @@ define protected amdgpu_kernel void @IllegalGEPConst(i32 %a, ptr addrspace(1) %b ; CHECK-NEXT: .LBB0_8: ; %atomicrmw.shared ; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0 ; CHECK-NEXT: s_cselect_b32 s0, s0, -1 -; CHECK-NEXT: v_mov_b32_e32 v2, s0 -; CHECK-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] -; CHECK-NEXT: ds_add_f64 v2, v[0:1] +; CHECK-NEXT: v_mov_b32_e32 v0, s0 +; CHECK-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1] +; CHECK-NEXT: ds_add_f64 v0, v[2:3] ; CHECK-NEXT: s_waitcnt lgkmcnt(0) ; CHECK-NEXT: s_endpgm entry: diff --git a/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-no-rtn.ll b/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-no-rtn.ll index c2ddce47ffd78..85549b86a9a6c 100644 --- a/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-no-rtn.ll +++ b/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-no-rtn.ll @@ -146,7 +146,7 @@ define amdgpu_ps void @global_atomic_fadd_f32_saddr_no_rtn_atomicrmw(ptr addrspa ; GFX90A_GFX942-NEXT: successors: %bb.3(0x80000000) ; GFX90A_GFX942-NEXT: {{ $}} ; GFX90A_GFX942-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec - ; GFX90A_GFX942-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY %1 + ; GFX90A_GFX942-NEXT: [[COPY8:%[0-9]+]]:av_32 = COPY %1 ; GFX90A_GFX942-NEXT: GLOBAL_ATOMIC_ADD_F32_SADDR killed [[V_MOV_B32_e32_1]], [[COPY8]], [[COPY3]], 0, 0, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1) ; GFX90A_GFX942-NEXT: {{ $}} ; GFX90A_GFX942-NEXT: bb.3.Flow: diff --git a/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f64.ll b/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f64.ll index 682c1cd8060aa..02884559bdaa9 100644 --- a/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f64.ll +++ b/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f64.ll @@ -14,7 +14,7 @@ define amdgpu_ps void @global_atomic_fadd_f64_no_rtn_atomicrmw(ptr addrspace(1) ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1 ; GFX90A_GFX942-NEXT: [[COPY4:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] - ; GFX90A_GFX942-NEXT: [[COPY5:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]] + ; GFX90A_GFX942-NEXT: [[COPY5:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE]] ; GFX90A_GFX942-NEXT: GLOBAL_ATOMIC_ADD_F64 killed [[COPY4]], killed [[COPY5]], 0, 0, implicit $exec :: (load store syncscope("wavefront") monotonic (s64) on %ir.ptr, addrspace 1) ; GFX90A_GFX942-NEXT: S_ENDPGM 0 %ret = atomicrmw fadd ptr addrspace(1) %ptr, double %data syncscope("wavefront") monotonic, !amdgpu.no.fine.grained.memory !0 @@ -105,7 +105,7 @@ define amdgpu_ps void @global_atomic_fadd_f64_saddr_no_rtn_atomicrmw(ptr addrspa ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1 ; GFX90A_GFX942-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1 ; GFX90A_GFX942-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec - ; GFX90A_GFX942-NEXT: [[COPY4:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] + ; GFX90A_GFX942-NEXT: [[COPY4:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE1]] ; GFX90A_GFX942-NEXT: GLOBAL_ATOMIC_ADD_F64_SADDR killed [[V_MOV_B32_e32_]], killed [[COPY4]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec :: (load store syncscope("wavefront") monotonic (s64) on %ir.ptr, addrspace 1) ; GFX90A_GFX942-NEXT: S_ENDPGM 0 %ret = atomicrmw fadd ptr addrspace(1) %ptr, double %data syncscope("wavefront") monotonic, !amdgpu.no.fine.grained.memory !0 diff --git a/llvm/test/CodeGen/AMDGPU/global-i16-load-store.ll b/llvm/test/CodeGen/AMDGPU/global-i16-load-store.ll index f2da966a56fc5..57bfd2490f9da 100644 --- a/llvm/test/CodeGen/AMDGPU/global-i16-load-store.ll +++ b/llvm/test/CodeGen/AMDGPU/global-i16-load-store.ll @@ -145,12 +145,12 @@ define amdgpu_kernel void @half4(ptr addrspace(1) nocapture readonly %0, ptr add ; GFX90A-LABEL: half4: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0 -; GFX90A-NEXT: v_mov_b32_e32 v2, 0 +; GFX90A-NEXT: v_mov_b32_e32 v0, 0 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) ; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[4:5], s[4:5] op_sel:[0,1] -; GFX90A-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1] +; GFX90A-NEXT: global_store_dwordx2 v0, v[2:3], s[2:3] ; GFX90A-NEXT: s_endpgm ; ; GFX1030-LABEL: half4: diff --git a/llvm/test/CodeGen/AMDGPU/infer-addrspace-flat-atomic.ll b/llvm/test/CodeGen/AMDGPU/infer-addrspace-flat-atomic.ll index 0a493e5188ad5..59dfd713ef4fd 100644 --- a/llvm/test/CodeGen/AMDGPU/infer-addrspace-flat-atomic.ll +++ b/llvm/test/CodeGen/AMDGPU/infer-addrspace-flat-atomic.ll @@ -141,9 +141,9 @@ define protected amdgpu_kernel void @InferPHI(i32 %a, ptr addrspace(1) %b, doubl ; CHECK-NEXT: s_mov_b64 s[0:1], -1 ; CHECK-NEXT: s_cbranch_vccz .LBB3_7 ; CHECK-NEXT: ; %bb.6: ; %atomicrmw.global -; CHECK-NEXT: v_mov_b32_e32 v2, 0 -; CHECK-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] -; CHECK-NEXT: global_atomic_add_f64 v2, v[0:1], s[4:5] +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1] +; CHECK-NEXT: global_atomic_add_f64 v0, v[2:3], s[4:5] ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: buffer_wbinvl1_vol ; CHECK-NEXT: s_mov_b64 s[0:1], 0 @@ -165,9 +165,9 @@ define protected amdgpu_kernel void @InferPHI(i32 %a, ptr addrspace(1) %b, doubl ; CHECK-NEXT: .LBB3_10: ; %atomicrmw.shared ; CHECK-NEXT: s_cmp_lg_u64 s[4:5], 0 ; CHECK-NEXT: s_cselect_b32 s0, s4, -1 -; CHECK-NEXT: v_mov_b32_e32 v2, s0 -; CHECK-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] -; CHECK-NEXT: ds_add_f64 v2, v[0:1] +; CHECK-NEXT: v_mov_b32_e32 v0, s0 +; CHECK-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1] +; CHECK-NEXT: ds_add_f64 v0, v[2:3] ; CHECK-NEXT: s_waitcnt lgkmcnt(0) ; CHECK-NEXT: s_endpgm entry: diff --git a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll index b8f9571ccc2ee..e6960a3f710da 100644 --- a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll +++ b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll @@ -1072,14 +1072,14 @@ define amdgpu_kernel void @mad_i64_i32_uniform(ptr addrspace(1) %out, i32 %arg0, ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34 -; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: s_mul_hi_u32 s4, s2, s3 ; GFX9-NEXT: s_mul_i32 s2, s2, s3 ; GFX9-NEXT: s_add_u32 s2, s2, s6 ; GFX9-NEXT: s_addc_u32 s3, s4, s7 -; GFX9-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] -; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] +; GFX9-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1] +; GFX9-NEXT: global_store_dwordx2 v0, v[2:3], s[0:1] ; GFX9-NEXT: s_endpgm ; ; GFX11-LABEL: mad_i64_i32_uniform: diff --git a/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll b/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll index f54a3831d43b4..73a474383becb 100644 --- a/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll +++ b/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll @@ -61,10 +61,10 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 { ; REGALLOC-GFX90A-NEXT: liveins: $sgpr4_sgpr5 ; REGALLOC-GFX90A-NEXT: {{ $}} ; REGALLOC-GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2162697 /* reguse:AGPR_32 */, undef %6:agpr_32 - ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6422538 /* regdef:VReg_128_Align2 */, def %24 - ; REGALLOC-GFX90A-NEXT: [[COPY:%[0-9]+]]:av_128_align2 = COPY %24 - ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3866634 /* regdef:VReg_64_Align2 */, def %22 - ; REGALLOC-GFX90A-NEXT: [[COPY1:%[0-9]+]]:av_64_align2 = COPY %22 + ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6422538 /* regdef:VReg_128_Align2 */, def %23 + ; REGALLOC-GFX90A-NEXT: [[COPY:%[0-9]+]]:av_128_align2 = COPY %23 + ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3866634 /* regdef:VReg_64_Align2 */, def %21 + ; REGALLOC-GFX90A-NEXT: [[COPY1:%[0-9]+]]:av_64_align2 = COPY %21 ; REGALLOC-GFX90A-NEXT: GLOBAL_STORE_DWORDX4 undef %15:vreg_64_align2, [[COPY]], 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) poison`, addrspace 1) ; REGALLOC-GFX90A-NEXT: renamable $sgpr0_sgpr1_sgpr2_sgpr3 = S_LOAD_DWORDX4_IMM killed renamable $sgpr4_sgpr5, 0, 0 :: (dereferenceable invariant load (s128) from %ir.arg.kernarg.offset1, addrspace 4) ; REGALLOC-GFX90A-NEXT: [[COPY2:%[0-9]+]]:areg_128_align2 = COPY killed renamable $sgpr0_sgpr1_sgpr2_sgpr3 diff --git a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll index 546054cba4700..f5e136a80b4a8 100644 --- a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll +++ b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll @@ -209,10 +209,10 @@ define amdgpu_kernel void @incorrect_type_i64_block_count_x(ptr addrspace(1) inr ; GFX942-NEXT: ; %bb.2: ; GFX942-NEXT: .LBB5_0: ; GFX942-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8 -; GFX942-NEXT: v_mov_b32_e32 v2, 0 +; GFX942-NEXT: v_mov_b32_e32 v0, 0 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[0:1] -; GFX942-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[2:3] ; GFX942-NEXT: s_endpgm ; ; GFX90a-LABEL: incorrect_type_i64_block_count_x: @@ -224,10 +224,10 @@ define amdgpu_kernel void @incorrect_type_i64_block_count_x(ptr addrspace(1) inr ; GFX90a-NEXT: ; %bb.2: ; GFX90a-NEXT: .LBB5_0: ; GFX90a-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8 -; GFX90a-NEXT: v_mov_b32_e32 v2, 0 +; GFX90a-NEXT: v_mov_b32_e32 v0, 0 ; GFX90a-NEXT: s_waitcnt lgkmcnt(0) -; GFX90a-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1] -; GFX90a-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9] +; GFX90a-NEXT: v_pk_mov_b32 v[2:3], s[0:1], s[0:1] op_sel:[0,1] +; GFX90a-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9] ; GFX90a-NEXT: s_endpgm ; ; GFX1250-LABEL: incorrect_type_i64_block_count_x: diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll b/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll index 58f0b9657476c..84aa948ac11b3 100644 --- a/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll +++ b/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll @@ -13,7 +13,8 @@ define amdgpu_kernel void @preload_ptr_kernarg_header(ptr inreg %arg) { ; ASM-NEXT: .p2align 8 ; ASM-NEXT: .LBB0_0: ; ASM-NEXT: v_mov_b64_e32 v[0:1], s[8:9] -; ASM-NEXT: flat_store_dwordx2 v[0:1], v[0:1] +; ASM-NEXT: v_mov_b64_e32 v[2:3], s[8:9] +; ASM-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; ASM-NEXT: s_endpgm store ptr %arg, ptr %arg ret void diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll index be86fd18cd737..4d367ef7ffd9d 100644 --- a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll +++ b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll @@ -837,9 +837,9 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) inreg %out, i ; GFX942-NEXT: .p2align 8 ; GFX942-NEXT: ; %bb.2: ; GFX942-NEXT: .LBB16_0: -; GFX942-NEXT: v_mov_b32_e32 v2, 0 -; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[4:5] -; GFX942-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] +; GFX942-NEXT: v_mov_b32_e32 v0, 0 +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[4:5] +; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[2:3] ; GFX942-NEXT: s_endpgm ; ; GFX90a-LABEL: i64_kernel_preload_arg: @@ -850,9 +850,9 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) inreg %out, i ; GFX90a-NEXT: .p2align 8 ; GFX90a-NEXT: ; %bb.2: ; GFX90a-NEXT: .LBB16_0: -; GFX90a-NEXT: v_mov_b32_e32 v2, 0 -; GFX90a-NEXT: v_pk_mov_b32 v[0:1], s[10:11], s[10:11] op_sel:[0,1] -; GFX90a-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9] +; GFX90a-NEXT: v_mov_b32_e32 v0, 0 +; GFX90a-NEXT: v_pk_mov_b32 v[2:3], s[10:11], s[10:11] op_sel:[0,1] +; GFX90a-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9] ; GFX90a-NEXT: s_endpgm ; ; GFX1250-LABEL: i64_kernel_preload_arg: @@ -875,9 +875,9 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) inreg %out, d ; GFX942-NEXT: .p2align 8 ; GFX942-NEXT: ; %bb.2: ; GFX942-NEXT: .LBB17_0: -; GFX942-NEXT: v_mov_b32_e32 v2, 0 -; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[4:5] -; GFX942-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] +; GFX942-NEXT: v_mov_b32_e32 v0, 0 +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[4:5] +; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[2:3] ; GFX942-NEXT: s_endpgm ; ; GFX90a-LABEL: f64_kernel_preload_arg: @@ -888,9 +888,9 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) inreg %out, d ; GFX90a-NEXT: .p2align 8 ; GFX90a-NEXT: ; %bb.2: ; GFX90a-NEXT: .LBB17_0: -; GFX90a-NEXT: v_mov_b32_e32 v2, 0 -; GFX90a-NEXT: v_pk_mov_b32 v[0:1], s[10:11], s[10:11] op_sel:[0,1] -; GFX90a-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9] +; GFX90a-NEXT: v_mov_b32_e32 v0, 0 +; GFX90a-NEXT: v_pk_mov_b32 v[2:3], s[10:11], s[10:11] op_sel:[0,1] +; GFX90a-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9] ; GFX90a-NEXT: s_endpgm ; ; GFX1250-LABEL: f64_kernel_preload_arg: diff --git a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll index 68ef30a90646e..4db232cbfa8c7 100644 --- a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll +++ b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll @@ -103,17 +103,16 @@ define void @baseptr_null(i64 %offset, i8 %v) { define amdgpu_kernel void @llvm_amdgcn_queue_ptr(ptr addrspace(1) %ptr) #0 { ; GFX942-LABEL: llvm_amdgcn_queue_ptr: ; GFX942: ; %bb.0: -; GFX942-NEXT: v_mov_b32_e32 v2, 0 -; GFX942-NEXT: global_load_ubyte v0, v2, s[2:3] sc0 sc1 -; GFX942-NEXT: global_load_ubyte v0, v2, s[4:5] offset:8 sc0 sc1 -; GFX942-NEXT: global_load_ubyte v0, v2, s[0:1] sc0 sc1 +; GFX942-NEXT: v_mov_b32_e32 v0, 0 +; GFX942-NEXT: global_load_ubyte v1, v0, s[2:3] sc0 sc1 +; GFX942-NEXT: global_load_ubyte v1, v0, s[4:5] offset:8 sc0 sc1 +; GFX942-NEXT: global_load_ubyte v1, v0, s[0:1] sc0 sc1 ; GFX942-NEXT: ; kill: killed $sgpr0_sgpr1 ; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0 -; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[6:7] +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[6:7] ; GFX942-NEXT: ; kill: killed $sgpr2_sgpr3 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] sc0 sc1 +; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[0:1] sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_endpgm %queue.ptr = call ptr addrspace(4) @llvm.amdgcn.queue.ptr() diff --git a/llvm/test/CodeGen/AMDGPU/store-to-constant.ll b/llvm/test/CodeGen/AMDGPU/store-to-constant.ll index 64d5d01454a37..9b3b52012f327 100644 --- a/llvm/test/CodeGen/AMDGPU/store-to-constant.ll +++ b/llvm/test/CodeGen/AMDGPU/store-to-constant.ll @@ -134,10 +134,10 @@ define amdgpu_kernel void @store_as4_2xi32(ptr addrspace(4) %p, <2 x i32> %v) { ; CHECK-LABEL: store_as4_2xi32: ; CHECK: ; %bb.0: ; CHECK-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0 -; CHECK-NEXT: v_mov_b32_e32 v2, 0 +; CHECK-NEXT: v_mov_b32_e32 v0, 0 ; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] -; CHECK-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] +; CHECK-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1] +; CHECK-NEXT: global_store_dwordx2 v0, v[2:3], s[0:1] ; CHECK-NEXT: s_endpgm store <2 x i32> %v, ptr addrspace(4) %p ret void @@ -161,10 +161,10 @@ define amdgpu_kernel void @store_as4_2xfloat(ptr addrspace(4) %p, <2 x float> %v ; CHECK-LABEL: store_as4_2xfloat: ; CHECK: ; %bb.0: ; CHECK-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0 -; CHECK-NEXT: v_mov_b32_e32 v2, 0 +; CHECK-NEXT: v_mov_b32_e32 v0, 0 ; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] -; CHECK-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] +; CHECK-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1] +; CHECK-NEXT: global_store_dwordx2 v0, v[2:3], s[0:1] ; CHECK-NEXT: s_endpgm store <2 x float> %v, ptr addrspace(4) %p ret void @@ -175,11 +175,11 @@ define amdgpu_kernel void @store_as4_2xdouble(ptr addrspace(4) %p, <2 x double> ; CHECK: ; %bb.0: ; CHECK-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x10 ; CHECK-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0 -; CHECK-NEXT: v_mov_b32_e32 v4, 0 +; CHECK-NEXT: v_mov_b32_e32 v0, 0 ; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1] -; CHECK-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1] -; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[4:5] +; CHECK-NEXT: v_pk_mov_b32 v[4:5], s[2:3], s[2:3] op_sel:[0,1] +; CHECK-NEXT: v_pk_mov_b32 v[2:3], s[0:1], s[0:1] op_sel:[0,1] +; CHECK-NEXT: global_store_dwordx4 v0, v[2:5], s[4:5] ; CHECK-NEXT: s_endpgm store <2 x double> %v, ptr addrspace(4) %p ret void diff --git a/llvm/test/CodeGen/AMDGPU/tuple-allocation-failure.ll b/llvm/test/CodeGen/AMDGPU/tuple-allocation-failure.ll index f6c357dc38b48..d80ec6bd34945 100644 --- a/llvm/test/CodeGen/AMDGPU/tuple-allocation-failure.ll +++ b/llvm/test/CodeGen/AMDGPU/tuple-allocation-failure.ll @@ -83,6 +83,7 @@ define amdgpu_kernel void @kernel(ptr addrspace(1) %arg1.global, i1 %tmp3.i.i, i ; GLOBALNESS1-NEXT: s_mov_b32 s84, s14 ; GLOBALNESS1-NEXT: s_mov_b64 s[34:35], s[10:11] ; GLOBALNESS1-NEXT: v_mov_b32_e32 v47, 0 +; GLOBALNESS1-NEXT: v_mov_b32_e32 v43, v42 ; GLOBALNESS1-NEXT: s_mov_b32 s32, 0 ; GLOBALNESS1-NEXT: ; implicit-def: $vgpr58_vgpr59 ; GLOBALNESS1-NEXT: s_waitcnt vmcnt(0) @@ -194,7 +195,6 @@ define amdgpu_kernel void @kernel(ptr addrspace(1) %arg1.global, i1 %tmp3.i.i, i ; GLOBALNESS1-NEXT: s_cbranch_vccnz .LBB1_13 ; GLOBALNESS1-NEXT: ; %bb.12: ; %bb39.i ; GLOBALNESS1-NEXT: ; in Loop: Header=BB1_4 Depth=1 -; GLOBALNESS1-NEXT: v_mov_b32_e32 v43, v42 ; GLOBALNESS1-NEXT: global_store_dwordx2 v[44:45], v[42:43], off ; GLOBALNESS1-NEXT: .LBB1_13: ; %bb44.lr.ph.i ; GLOBALNESS1-NEXT: ; in Loop: Header=BB1_4 Depth=1 @@ -271,7 +271,6 @@ define amdgpu_kernel void @kernel(ptr addrspace(1) %arg1.global, i1 %tmp3.i.i, i ; GLOBALNESS1-NEXT: s_cbranch_execz .LBB1_14 ; GLOBALNESS1-NEXT: ; %bb.23: ; %bb62.i ; GLOBALNESS1-NEXT: ; in Loop: Header=BB1_16 Depth=2 -; GLOBALNESS1-NEXT: v_mov_b32_e32 v43, v42 ; GLOBALNESS1-NEXT: global_store_dwordx2 v[44:45], v[42:43], off ; GLOBALNESS1-NEXT: s_branch .LBB1_14 ; GLOBALNESS1-NEXT: .LBB1_24: ; %Flow23 @@ -297,12 +296,10 @@ define amdgpu_kernel void @kernel(ptr addrspace(1) %arg1.global, i1 %tmp3.i.i, i ; GLOBALNESS1-NEXT: s_cbranch_vccnz .LBB1_1 ; GLOBALNESS1-NEXT: ; %bb.27: ; %bb69.i ; GLOBALNESS1-NEXT: ; in Loop: Header=BB1_4 Depth=1 -; GLOBALNESS1-NEXT: v_mov_b32_e32 v43, v42 ; GLOBALNESS1-NEXT: global_store_dwordx2 v[44:45], v[42:43], off ; GLOBALNESS1-NEXT: s_branch .LBB1_1 ; GLOBALNESS1-NEXT: .LBB1_28: ; %bb73.i ; GLOBALNESS1-NEXT: ; in Loop: Header=BB1_4 Depth=1 -; GLOBALNESS1-NEXT: v_mov_b32_e32 v43, v42 ; GLOBALNESS1-NEXT: global_store_dwordx2 v[44:45], v[42:43], off ; GLOBALNESS1-NEXT: s_branch .LBB1_2 ; GLOBALNESS1-NEXT: .LBB1_29: ; %loop.exit.guard @@ -397,6 +394,7 @@ define amdgpu_kernel void @kernel(ptr addrspace(1) %arg1.global, i1 %tmp3.i.i, i ; GLOBALNESS0-NEXT: s_mov_b32 s82, s14 ; GLOBALNESS0-NEXT: s_mov_b64 s[34:35], s[10:11] ; GLOBALNESS0-NEXT: v_mov_b32_e32 v47, 0 +; GLOBALNESS0-NEXT: v_mov_b32_e32 v43, v42 ; GLOBALNESS0-NEXT: s_mov_b32 s32, 0 ; GLOBALNESS0-NEXT: ; implicit-def: $vgpr58_vgpr59 ; GLOBALNESS0-NEXT: s_waitcnt vmcnt(0) @@ -509,7 +507,6 @@ define amdgpu_kernel void @kernel(ptr addrspace(1) %arg1.global, i1 %tmp3.i.i, i ; GLOBALNESS0-NEXT: s_cbranch_vccnz .LBB1_13 ; GLOBALNESS0-NEXT: ; %bb.12: ; %bb39.i ; GLOBALNESS0-NEXT: ; in Loop: Header=BB1_4 Depth=1 -; GLOBALNESS0-NEXT: v_mov_b32_e32 v43, v42 ; GLOBALNESS0-NEXT: global_store_dwordx2 v[44:45], v[42:43], off ; GLOBALNESS0-NEXT: .LBB1_13: ; %bb44.lr.ph.i ; GLOBALNESS0-NEXT: ; in Loop: Header=BB1_4 Depth=1 @@ -586,7 +583,6 @@ define amdgpu_kernel void @kernel(ptr addrspace(1) %arg1.global, i1 %tmp3.i.i, i ; GLOBALNESS0-NEXT: s_cbranch_execz .LBB1_14 ; GLOBALNESS0-NEXT: ; %bb.23: ; %bb62.i ; GLOBALNESS0-NEXT: ; in Loop: Header=BB1_16 Depth=2 -; GLOBALNESS0-NEXT: v_mov_b32_e32 v43, v42 ; GLOBALNESS0-NEXT: global_store_dwordx2 v[44:45], v[42:43], off ; GLOBALNESS0-NEXT: s_branch .LBB1_14 ; GLOBALNESS0-NEXT: .LBB1_24: ; %Flow23 @@ -610,12 +606,10 @@ define amdgpu_kernel void @kernel(ptr addrspace(1) %arg1.global, i1 %tmp3.i.i, i ; GLOBALNESS0-NEXT: s_cbranch_vccnz .LBB1_1 ; GLOBALNESS0-NEXT: ; %bb.27: ; %bb69.i ; GLOBALNESS0-NEXT: ; in Loop: Header=BB1_4 Depth=1 -; GLOBALNESS0-NEXT: v_mov_b32_e32 v43, v42 ; GLOBALNESS0-NEXT: global_store_dwordx2 v[44:45], v[42:43], off ; GLOBALNESS0-NEXT: s_branch .LBB1_1 ; GLOBALNESS0-NEXT: .LBB1_28: ; %bb73.i ; GLOBALNESS0-NEXT: ; in Loop: Header=BB1_4 Depth=1 -; GLOBALNESS0-NEXT: v_mov_b32_e32 v43, v42 ; GLOBALNESS0-NEXT: global_store_dwordx2 v[44:45], v[42:43], off ; GLOBALNESS0-NEXT: s_branch .LBB1_2 ; GLOBALNESS0-NEXT: .LBB1_29: ; %loop.exit.guard diff --git a/llvm/test/CodeGen/AMDGPU/undef-handling-crash-in-ra.ll b/llvm/test/CodeGen/AMDGPU/undef-handling-crash-in-ra.ll index b3166fa3f4548..89b90d7cd74f6 100644 --- a/llvm/test/CodeGen/AMDGPU/undef-handling-crash-in-ra.ll +++ b/llvm/test/CodeGen/AMDGPU/undef-handling-crash-in-ra.ll @@ -10,35 +10,31 @@ define amdgpu_kernel void @foo(ptr addrspace(5) %ptr5, ptr %p0, double %v0, <4 x ; CHECK-NEXT: s_addc_u32 flat_scratch_hi, s13, 0 ; CHECK-NEXT: v_pk_mov_b32 v[44:45], 0, 0 ; CHECK-NEXT: flat_load_dword v42, v[44:45] -; CHECK-NEXT: s_mov_b64 s[38:39], s[6:7] -; CHECK-NEXT: s_mov_b64 s[48:49], s[4:5] -; CHECK-NEXT: s_load_dwordx4 s[4:7], s[8:9], 0x8 -; CHECK-NEXT: s_load_dword s64, s[8:9], 0x0 +; CHECK-NEXT: s_load_dwordx4 s[64:67], s[8:9], 0x8 +; CHECK-NEXT: s_load_dword s68, s[8:9], 0x0 ; CHECK-NEXT: s_add_u32 s0, s0, s17 ; CHECK-NEXT: s_addc_u32 s1, s1, 0 ; CHECK-NEXT: s_mov_b64 s[34:35], s[8:9] +; CHECK-NEXT: s_mov_b64 s[48:49], s[4:5] +; CHECK-NEXT: s_mov_b64 s[4:5], src_private_base ; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_accvgpr_write_b32 a32, s6 -; CHECK-NEXT: v_accvgpr_write_b32 a33, s7 -; CHECK-NEXT: s_mov_b64 s[6:7], src_private_base -; CHECK-NEXT: s_cmp_lg_u32 s64, -1 -; CHECK-NEXT: s_cselect_b32 s7, s7, 0 -; CHECK-NEXT: s_cselect_b32 s8, s64, 0 +; CHECK-NEXT: s_cmp_lg_u32 s68, -1 +; CHECK-NEXT: s_cselect_b32 s4, s5, 0 +; CHECK-NEXT: s_cselect_b32 s5, s68, 0 ; CHECK-NEXT: s_add_u32 s50, s34, 48 ; CHECK-NEXT: s_addc_u32 s51, s35, 0 -; CHECK-NEXT: v_pk_mov_b32 v[56:57], s[4:5], s[4:5] op_sel:[0,1] +; CHECK-NEXT: v_mov_b32_e32 v46, s5 +; CHECK-NEXT: v_mov_b32_e32 v47, s4 ; CHECK-NEXT: s_getpc_b64 s[4:5] ; CHECK-NEXT: s_add_u32 s4, s4, G@gotpcrel32@lo+4 ; CHECK-NEXT: s_addc_u32 s5, s5, G@gotpcrel32@hi+12 -; CHECK-NEXT: s_load_dwordx2 s[54:55], s[4:5], 0x0 -; CHECK-NEXT: s_mov_b32 s6, 0 -; CHECK-NEXT: v_mov_b32_e32 v47, s7 -; CHECK-NEXT: s_mov_b32 s7, s6 +; CHECK-NEXT: v_pk_mov_b32 v[56:57], s[64:65], s[64:65] op_sel:[0,1] +; CHECK-NEXT: s_load_dwordx2 s[64:65], s[4:5], 0x0 +; CHECK-NEXT: s_mov_b32 s54, 0 +; CHECK-NEXT: s_mov_b32 s55, s54 ; CHECK-NEXT: s_mov_b32 s53, s14 -; CHECK-NEXT: v_mov_b32_e32 v46, s8 -; CHECK-NEXT: v_pk_mov_b32 v[58:59], s[6:7], s[6:7] op_sel:[0,1] +; CHECK-NEXT: v_pk_mov_b32 v[62:63], s[54:55], s[54:55] op_sel:[0,1] ; CHECK-NEXT: s_mov_b64 s[4:5], s[48:49] -; CHECK-NEXT: s_mov_b64 s[6:7], s[38:39] ; CHECK-NEXT: s_mov_b64 s[8:9], s[50:51] ; CHECK-NEXT: s_mov_b32 s12, s14 ; CHECK-NEXT: s_mov_b32 s13, s15 @@ -48,14 +44,17 @@ define amdgpu_kernel void @foo(ptr addrspace(5) %ptr5, ptr %p0, double %v0, <4 x ; CHECK-NEXT: s_mov_b32 s33, s16 ; CHECK-NEXT: s_mov_b32 s52, s15 ; CHECK-NEXT: s_mov_b64 s[36:37], s[10:11] +; CHECK-NEXT: s_mov_b64 s[38:39], s[6:7] ; CHECK-NEXT: v_mov_b32_e32 v40, v0 -; CHECK-NEXT: flat_store_dwordx2 v[56:57], v[58:59] +; CHECK-NEXT: v_mov_b32_e32 v60, s66 +; CHECK-NEXT: v_mov_b32_e32 v61, s67 +; CHECK-NEXT: flat_store_dwordx2 v[56:57], v[62:63] ; CHECK-NEXT: ; kill: def $sgpr15 killed $sgpr15 ; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: s_swappc_b64 s[30:31], s[54:55] -; CHECK-NEXT: flat_load_dwordx2 v[60:61], v[56:57] -; CHECK-NEXT: v_mov_b32_e32 v62, 0 -; CHECK-NEXT: v_mov_b32_e32 v63, 0x3ff00000 +; CHECK-NEXT: s_swappc_b64 s[30:31], s[64:65] +; CHECK-NEXT: flat_load_dwordx2 v[58:59], v[56:57] +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: v_mov_b32_e32 v1, 0x3ff00000 ; CHECK-NEXT: s_mov_b64 s[4:5], s[48:49] ; CHECK-NEXT: s_mov_b64 s[6:7], s[38:39] ; CHECK-NEXT: s_mov_b64 s[8:9], s[50:51] @@ -64,20 +63,22 @@ define amdgpu_kernel void @foo(ptr addrspace(5) %ptr5, ptr %p0, double %v0, <4 x ; CHECK-NEXT: s_mov_b32 s13, s52 ; CHECK-NEXT: s_mov_b32 s14, s33 ; CHECK-NEXT: v_mov_b32_e32 v31, v40 -; CHECK-NEXT: flat_store_dwordx2 v[44:45], v[62:63] -; CHECK-NEXT: flat_store_dwordx2 v[56:57], v[58:59] +; CHECK-NEXT: flat_store_dwordx2 v[44:45], v[0:1] +; CHECK-NEXT: flat_store_dwordx2 v[56:57], v[62:63] ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: ; kill: def $sgpr15 killed $sgpr15 -; CHECK-NEXT: s_swappc_b64 s[30:31], s[54:55] +; CHECK-NEXT: s_swappc_b64 s[30:31], s[64:65] ; CHECK-NEXT: flat_load_dwordx2 v[0:1], v[46:47] glc ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v0, s64 -; CHECK-NEXT: v_cmp_lt_i32_e32 vcc, 0, v42 -; CHECK-NEXT: flat_store_dwordx2 v[56:57], v[60:61] +; CHECK-NEXT: v_mov_b32_e32 v1, s67 +; CHECK-NEXT: v_mov_b32_e32 v0, s68 +; CHECK-NEXT: flat_store_dwordx2 v[56:57], v[58:59] ; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: flat_store_dwordx2 v[56:57], a[32:33] -; CHECK-NEXT: buffer_store_dword a33, v0, s[0:3], 0 offen offset:4 -; CHECK-NEXT: buffer_store_dword v62, v0, s[0:3], 0 offen +; CHECK-NEXT: flat_store_dwordx2 v[56:57], v[60:61] +; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:4 +; CHECK-NEXT: v_mov_b32_e32 v1, s54 +; CHECK-NEXT: v_cmp_lt_i32_e32 vcc, 0, v42 +; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; CHECK-NEXT: ; implicit-def: $vgpr4 ; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc ; CHECK-NEXT: s_xor_b64 s[4:5], exec, s[4:5] diff --git a/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll b/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll index fe7def8a69278..11d724eda547e 100644 --- a/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll +++ b/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll @@ -2190,13 +2190,13 @@ define amdgpu_kernel void @shuffle_scalar_load_v8i32_0123(ptr addrspace(4) %in, ; GFX942-LABEL: shuffle_scalar_load_v8i32_0123: ; GFX942: ; %bb.0: ; GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 -; GFX942-NEXT: v_mov_b32_e32 v4, 0 +; GFX942-NEXT: v_mov_b32_e32 v0, 0 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x0 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-NEXT: v_mov_b64_e32 v[0:1], s[4:5] -; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[6:7] -; GFX942-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] +; GFX942-NEXT: v_mov_b64_e32 v[2:3], s[4:5] +; GFX942-NEXT: v_mov_b64_e32 v[4:5], s[6:7] +; GFX942-NEXT: global_store_dwordx4 v0, v[2:5], s[2:3] ; GFX942-NEXT: s_endpgm ; ; GFX10-LABEL: shuffle_scalar_load_v8i32_0123: