Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5296,6 +5296,30 @@ SDValue AMDGPUTargetLowering::performRcpCombine(SDNode *N,
return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0));
}

bool AMDGPUTargetLowering::isInt64ImmLegal(SDNode *N, SelectionDAG &DAG) const {
if (!Subtarget->isGCN())
return false;

ConstantSDNode *SDConstant = dyn_cast<ConstantSDNode>(N);
ConstantFPSDNode *SDFPConstant = dyn_cast<ConstantFPSDNode>(N);
auto &ST = DAG.getSubtarget<GCNSubtarget>();
const auto *TII = ST.getInstrInfo();

if (!ST.hasMovB64() || (!SDConstant && !SDFPConstant))
return false;

if (ST.has64BitLiterals())
return true;

if (SDConstant) {
const APInt &APVal = SDConstant->getAPIntValue();
return isUInt<32>(APVal.getZExtValue()) || TII->isInlineConstant(APVal);
}

APInt Val = SDFPConstant->getValueAPF().bitcastToAPInt();
return isUInt<32>(Val.getZExtValue()) || TII->isInlineConstant(Val);
}

SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
Expand Down Expand Up @@ -5345,6 +5369,8 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
SDValue Src = N->getOperand(0);
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) {
SDLoc SL(N);
if (isInt64ImmLegal(C, DAG))
break;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you track down the existing tests for this combine, and add a gfx942 run line? I'm sure it's missing. Most of this code probably hasn't been revisited

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The original patch didn't add any tests and only affected about 4 tests; I've checked what tests are affected by removing this combine altogether locally and spawned #154363 with tests for which I enable gfx942 (I did cut a bunch of tests for gfx942 enabling that seemed to be more bug/crash tests rather than actual codegen tests)

uint64_t CVal = C->getZExtValue();
SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
Expand All @@ -5355,6 +5381,8 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) {
const APInt &Val = C->getValueAPF().bitcastToAPInt();
SDLoc SL(N);
if (isInt64ImmLegal(C, DAG))
break;
uint64_t CVal = Val.getZExtValue();
SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
Expand Down
3 changes: 3 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,9 @@ class AMDGPUTargetLowering : public TargetLowering {
SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;

protected:
/// Check whether value Val can be supported by v_mov_b64, for the current
/// target.
bool isInt64ImmLegal(SDNode *Val, SelectionDAG &DAG) const;
bool shouldCombineMemoryType(EVT VT) const;
SDValue performLoadCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const;
Expand Down
25 changes: 24 additions & 1 deletion llvm/lib/Target/AMDGPU/SIISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14584,13 +14584,36 @@ SITargetLowering::performExtractVectorEltCombine(SDNode *N,
return V;
}

// EXTRACT_VECTOR_ELT (v2i32 bitcast (i64/f64:k), Idx)
// =>
// i32:Lo(k) if Idx == 0, or
// i32:Hi(k) if Idx == 1
auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (Vec.getOpcode() == ISD::BITCAST && VecVT == MVT::v2i32 && Idx) {
SDLoc SL(N);
SDValue PeekThrough = Vec.getOperand(0);
auto *KImm = dyn_cast<ConstantSDNode>(PeekThrough);
if (KImm && KImm->getValueType(0).getSizeInBits() == 64) {
uint64_t KImmValue = KImm->getZExtValue();
return DAG.getConstant(
(KImmValue >> (32 * Idx->getZExtValue())) & 0xffffffff, SL, MVT::i32);
}
auto *KFPImm = dyn_cast<ConstantFPSDNode>(PeekThrough);
if (KFPImm && KFPImm->getValueType(0).getSizeInBits() == 64) {
uint64_t KFPImmValue =
KFPImm->getValueAPF().bitcastToAPInt().getZExtValue();
return DAG.getConstant((KFPImmValue >> (32 * Idx->getZExtValue())) &
0xffffffff,
SL, MVT::i32);
}
}

if (!DCI.isBeforeLegalize())
return SDValue();

// Try to turn sub-dword accesses of vectors into accesses of the same 32-bit
// elements. This exposes more load reduction opportunities by replacing
// multiple small extract_vector_elements with a single 32-bit extract.
auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (isa<MemSDNode>(Vec) && VecEltSize <= 16 && VecEltVT.isByteSized() &&
VecSize > 32 && VecSize % 32 == 0 && Idx) {
EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT);
Expand Down
57 changes: 29 additions & 28 deletions llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll
Original file line number Diff line number Diff line change
Expand Up @@ -7,21 +7,18 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
; CHECK-NEXT: s_load_dword s0, s[4:5], 0x8
; CHECK-NEXT: s_load_dwordx2 s[10:11], s[4:5], 0x0
; CHECK-NEXT: s_load_dwordx4 s[12:15], s[4:5], 0x10
; CHECK-NEXT: v_mov_b32_e32 v1, 0x3e21eeb6
; CHECK-NEXT: v_mov_b32_e32 v20, 0
; CHECK-NEXT: v_mov_b32_e32 v30, 0x9037ab78
; CHECK-NEXT: v_mov_b32_e32 v31, 0x3e21eeb6
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_bitcmp1_b32 s0, 0
; CHECK-NEXT: s_cselect_b64 s[16:17], -1, 0
; CHECK-NEXT: s_xor_b64 s[18:19], s[16:17], -1
; CHECK-NEXT: s_bitcmp1_b32 s0, 8
; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[2:3]
; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0
; CHECK-NEXT: v_mov_b32_e32 v0, 0x9037ab78
; CHECK-NEXT: v_accvgpr_write_b32 a3, v1
; CHECK-NEXT: s_xor_b64 s[20:21], s[2:3], -1
; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0
; CHECK-NEXT: s_and_b64 s[2:3], exec, s[2:3]
; CHECK-NEXT: v_accvgpr_write_b32 a2, v0
; CHECK-NEXT: v_mov_b32_e32 v2, 0xa17f65f6
; CHECK-NEXT: v_mov_b32_e32 v3, 0xbe927e4f
; CHECK-NEXT: v_mov_b32_e32 v4, 0x19f4ec90
Expand All @@ -37,14 +34,15 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
; CHECK-NEXT: v_mov_b32_e32 v14, 0x8427b883
; CHECK-NEXT: v_mov_b32_e32 v15, 0x3fae1bb4
; CHECK-NEXT: s_mov_b64 s[22:23], 0
; CHECK-NEXT: v_mov_b32_e32 v0, 0x57b87036
; CHECK-NEXT: v_mov_b32_e32 v1, 0x3fb3b136
; CHECK-NEXT: v_mov_b32_e32 v20, 0x57b87036
; CHECK-NEXT: v_mov_b32_e32 v21, 0x3fb3b136
; CHECK-NEXT: s_and_b64 s[4:5], exec, s[16:17]
; CHECK-NEXT: v_mov_b32_e32 v18, 0x55555523
; CHECK-NEXT: v_mov_b32_e32 v19, 0xbfd55555
; CHECK-NEXT: s_and_b64 s[6:7], exec, s[18:19]
; CHECK-NEXT: v_mov_b32_e32 v21, v20
; CHECK-NEXT: ; implicit-def: $vgpr30_vgpr31
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b64_e32 v[16:17], 0
; CHECK-NEXT: ; implicit-def: $agpr0_agpr1
; CHECK-NEXT: ; implicit-def: $vgpr22_vgpr23
; CHECK-NEXT: s_branch .LBB0_2
; CHECK-NEXT: .LBB0_1: ; %Flow9
Expand All @@ -64,12 +62,11 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: v_mov_b64_e32 v[24:25], s[14:15]
; CHECK-NEXT: flat_load_dwordx2 v[24:25], v[24:25]
; CHECK-NEXT: v_accvgpr_read_b32 v27, a3
; CHECK-NEXT: v_accvgpr_read_b32 v26, a2
; CHECK-NEXT: v_mov_b64_e32 v[26:27], v[30:31]
; CHECK-NEXT: v_mov_b64_e32 v[28:29], v[2:3]
; CHECK-NEXT: v_mov_b64_e32 v[16:17], v[0:1]
; CHECK-NEXT: v_accvgpr_write_b32 a0, 0
; CHECK-NEXT: v_accvgpr_write_b32 a1, 0
; CHECK-NEXT: v_mov_b64_e32 v[16:17], v[20:21]
; CHECK-NEXT: v_accvgpr_write_b32 a2, 0
; CHECK-NEXT: v_accvgpr_write_b32 a3, 0
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_fmac_f64_e32 v[26:27], 0, v[24:25]
; CHECK-NEXT: v_fmac_f64_e32 v[28:29], 0, v[26:27]
Expand All @@ -96,30 +93,32 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
; CHECK-NEXT: .LBB0_6: ; %.preheader1855.i.i.i3329
; CHECK-NEXT: ; Parent Loop BB0_2 Depth=1
; CHECK-NEXT: ; => This Inner Loop Header: Depth=2
; CHECK-NEXT: v_accvgpr_read_b32 v29, a1
; CHECK-NEXT: v_accvgpr_read_b32 v28, a0
; CHECK-NEXT: v_accvgpr_read_b32 v29, a3
; CHECK-NEXT: v_accvgpr_read_b32 v28, a2
; CHECK-NEXT: s_mov_b64 s[24:25], -1
; CHECK-NEXT: s_mov_b64 s[8:9], -1
; CHECK-NEXT: s_mov_b64 vcc, s[2:3]
; CHECK-NEXT: ; implicit-def: $agpr0_agpr1
; CHECK-NEXT: ; implicit-def: $agpr2_agpr3
; CHECK-NEXT: s_cbranch_vccz .LBB0_5
; CHECK-NEXT: ; %bb.7: ; %.lr.ph2070.i.i.i3291
; CHECK-NEXT: ; in Loop: Header=BB0_6 Depth=2
; CHECK-NEXT: v_accvgpr_write_b32 a0, v30
; CHECK-NEXT: v_accvgpr_write_b32 a1, v31
; CHECK-NEXT: v_accvgpr_mov_b32 a3, a1
; CHECK-NEXT: v_accvgpr_mov_b32 a2, a0
; CHECK-NEXT: s_mov_b64 s[8:9], s[18:19]
; CHECK-NEXT: s_mov_b64 vcc, s[6:7]
; CHECK-NEXT: s_cbranch_vccz .LBB0_5
; CHECK-NEXT: ; %bb.8: ; %.preheader1856.preheader.i.i.i3325
; CHECK-NEXT: ; in Loop: Header=BB0_6 Depth=2
; CHECK-NEXT: v_accvgpr_write_b32 a0, v26
; CHECK-NEXT: v_accvgpr_write_b32 a2, v26
; CHECK-NEXT: s_mov_b64 s[24:25], 0
; CHECK-NEXT: v_accvgpr_write_b32 a1, v27
; CHECK-NEXT: v_accvgpr_write_b32 a3, v27
; CHECK-NEXT: s_mov_b64 s[8:9], 0
; CHECK-NEXT: s_branch .LBB0_5
; CHECK-NEXT: .LBB0_9: ; in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: v_mov_b64_e32 v[24:25], s[10:11]
; CHECK-NEXT: v_accvgpr_write_b32 a0, v24
; CHECK-NEXT: s_mov_b64 s[22:23], 0
; CHECK-NEXT: v_mov_b64_e32 v[30:31], s[10:11]
; CHECK-NEXT: v_accvgpr_write_b32 a1, v25
; CHECK-NEXT: s_mov_b64 s[8:9], s[20:21]
; CHECK-NEXT: s_branch .LBB0_15
; CHECK-NEXT: .LBB0_10: ; in Loop: Header=BB0_2 Depth=1
Expand All @@ -136,19 +135,21 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
; CHECK-NEXT: v_cndmask_b32_e64 v23, v23, 0, s[16:17]
; CHECK-NEXT: v_cndmask_b32_e64 v22, v22, 0, s[16:17]
; CHECK-NEXT: v_cndmask_b32_e64 v16, 0, 1, s[8:9]
; CHECK-NEXT: v_mov_b32_e32 v17, v16
; CHECK-NEXT: s_and_b64 s[8:9], exec, s[16:17]
; CHECK-NEXT: global_store_dwordx2 v20, v[16:17], s[12:13]
; CHECK-NEXT: v_mov_b32_e32 v17, v16
; CHECK-NEXT: s_cselect_b32 s23, s23, 0
; CHECK-NEXT: s_cselect_b32 s22, s22, 0
; CHECK-NEXT: s_mov_b64 s[8:9], -1
; CHECK-NEXT: global_store_dwordx2 v0, v[16:17], s[12:13]
; CHECK-NEXT: s_branch .LBB0_14
; CHECK-NEXT: .LBB0_13: ; in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: s_mov_b64 s[8:9], 0
; CHECK-NEXT: v_mov_b64_e32 v[22:23], 0
; CHECK-NEXT: .LBB0_14: ; %Flow6
; CHECK-NEXT: .LBB0_14: ; %Flow8
; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: v_mov_b64_e32 v[30:31], v[24:25]
; CHECK-NEXT: v_accvgpr_write_b32 a0, v24
; CHECK-NEXT: v_mov_b64_e32 v[16:17], 0
; CHECK-NEXT: v_accvgpr_write_b32 a1, v25
Comment on lines +150 to +152
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This looks worse, we now end up with more movs inside a loop

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good point, didn't think about the loop in this test. Looking into this.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This doesn't seem to be necessarily caused by this patch directly but a knock-on effect of a further constrained register with a kernel that requires agpr spilling.
After isel:

  %149:sreg_32 = S_MOV_B32 0
  %150:sreg_64 = REG_SEQUENCE %149:sreg_32, %subreg.sub0, %149:sreg_32, %subreg.sub1
  %152:av_64_align2 = COPY %150:sreg_64

Becomes:

  %150:vreg_64_align2 = V_MOV_B64_PSEUDO 0, implicit $exec

which is expected for the patch but does limit the possible agpr-spillable instructions.

The conversion seen here is the same as what previous occurred in bb4 which is at the same loop depth. Should it always emit av_mov_b64_* pseudo instead of the v_mov_b64_* pseudo?

; CHECK-NEXT: .LBB0_15: ; %Flow6
; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: s_mov_b64 s[24:25], -1
Expand All @@ -157,7 +158,7 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
; CHECK-NEXT: ; %bb.16: ; %._crit_edge2105.i.i.i2330
; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: s_mov_b64 s[24:25], 0
; CHECK-NEXT: global_store_dwordx2 v20, v[20:21], s[12:13]
; CHECK-NEXT: global_store_dwordx2 v0, v[16:17], s[12:13]
; CHECK-NEXT: s_branch .LBB0_1
; CHECK-NEXT: .LBB0_17: ; %DummyReturnBlock
; CHECK-NEXT: s_endpgm
Expand Down
6 changes: 2 additions & 4 deletions llvm/test/CodeGen/AMDGPU/flat-scratch.ll
Original file line number Diff line number Diff line change
Expand Up @@ -4158,8 +4158,7 @@ define void @store_load_i64_aligned(ptr addrspace(5) nocapture %arg) {
; GFX942-LABEL: store_load_i64_aligned:
; GFX942: ; %bb.0: ; %bb
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v2, 15
; GFX942-NEXT: v_mov_b32_e32 v3, 0
; GFX942-NEXT: v_mov_b64_e32 v[2:3], 15
; GFX942-NEXT: scratch_store_dwordx2 v0, v[2:3], off sc0 sc1
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: scratch_load_dwordx2 v[0:1], v0, off sc0 sc1
Expand Down Expand Up @@ -4269,8 +4268,7 @@ define void @store_load_i64_unaligned(ptr addrspace(5) nocapture %arg) {
; GFX942-LABEL: store_load_i64_unaligned:
; GFX942: ; %bb.0: ; %bb
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v2, 15
; GFX942-NEXT: v_mov_b32_e32 v3, 0
; GFX942-NEXT: v_mov_b64_e32 v[2:3], 15
; GFX942-NEXT: scratch_store_dwordx2 v0, v[2:3], off sc0 sc1
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: scratch_load_dwordx2 v[0:1], v0, off sc0 sc1
Expand Down
39 changes: 13 additions & 26 deletions llvm/test/CodeGen/AMDGPU/imm.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1969,10 +1969,9 @@ define amdgpu_kernel void @add_inline_imm_neg_1_f64(ptr addrspace(1) %out, [8 x
; GFX942-LABEL: add_inline_imm_neg_1_f64:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: v_mov_b32_e32 v0, -1
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b64_e32 v[0:1], -1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
Expand Down Expand Up @@ -2009,8 +2008,7 @@ define amdgpu_kernel void @add_inline_imm_neg_2_f64(ptr addrspace(1) %out, [8 x
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
; GFX942-NEXT: v_mov_b32_e32 v0, -2
; GFX942-NEXT: v_mov_b32_e32 v1, -1
; GFX942-NEXT: v_mov_b64_e32 v[0:1], -2
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
Expand Down Expand Up @@ -2047,8 +2045,7 @@ define amdgpu_kernel void @add_inline_imm_neg_16_f64(ptr addrspace(1) %out, [8 x
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
; GFX942-NEXT: v_mov_b32_e32 v0, -16
; GFX942-NEXT: v_mov_b32_e32 v1, -1
; GFX942-NEXT: v_mov_b64_e32 v[0:1], -16
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
Expand Down Expand Up @@ -2163,10 +2160,9 @@ define amdgpu_kernel void @store_inline_imm_0.0_f64(ptr addrspace(1) %out) {
; GFX942-LABEL: store_inline_imm_0.0_f64:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b64_e32 v[0:1], 0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
Expand Down Expand Up @@ -2239,8 +2235,7 @@ define amdgpu_kernel void @store_inline_imm_0.5_f64(ptr addrspace(1) %out) {
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, 0x3fe00000
; GFX942-NEXT: v_mov_b64_e32 v[0:1], 0.5
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
Expand Down Expand Up @@ -2276,8 +2271,7 @@ define amdgpu_kernel void @store_inline_imm_m_0.5_f64(ptr addrspace(1) %out) {
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, 0xbfe00000
; GFX942-NEXT: v_mov_b64_e32 v[0:1], -0.5
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
Expand Down Expand Up @@ -2313,8 +2307,7 @@ define amdgpu_kernel void @store_inline_imm_1.0_f64(ptr addrspace(1) %out) {
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, 0x3ff00000
; GFX942-NEXT: v_mov_b64_e32 v[0:1], 1.0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
Expand Down Expand Up @@ -2350,8 +2343,7 @@ define amdgpu_kernel void @store_inline_imm_m_1.0_f64(ptr addrspace(1) %out) {
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, 0xbff00000
; GFX942-NEXT: v_mov_b64_e32 v[0:1], -1.0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
Expand Down Expand Up @@ -2387,8 +2379,7 @@ define amdgpu_kernel void @store_inline_imm_2.0_f64(ptr addrspace(1) %out) {
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, 2.0
; GFX942-NEXT: v_mov_b64_e32 v[0:1], 2.0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
Expand Down Expand Up @@ -2424,8 +2415,7 @@ define amdgpu_kernel void @store_inline_imm_m_2.0_f64(ptr addrspace(1) %out) {
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, -2.0
; GFX942-NEXT: v_mov_b64_e32 v[0:1], -2.0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
Expand Down Expand Up @@ -2461,8 +2451,7 @@ define amdgpu_kernel void @store_inline_imm_4.0_f64(ptr addrspace(1) %out) {
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, 0x40100000
; GFX942-NEXT: v_mov_b64_e32 v[0:1], 4.0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
Expand Down Expand Up @@ -2498,8 +2487,7 @@ define amdgpu_kernel void @store_inline_imm_m_4.0_f64(ptr addrspace(1) %out) {
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: v_mov_b32_e32 v1, 0xc0100000
; GFX942-NEXT: v_mov_b64_e32 v[0:1], -4.0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
Expand Down Expand Up @@ -2535,8 +2523,7 @@ define amdgpu_kernel void @store_inv_2pi_f64(ptr addrspace(1) %out) {
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
; GFX942-NEXT: v_mov_b32_e32 v0, 0x6dc9c882
; GFX942-NEXT: v_mov_b32_e32 v1, 0x3fc45f30
; GFX942-NEXT: v_mov_b64_e32 v[0:1], 0.15915494309189532
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
Expand Down