Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 19 additions & 5 deletions llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,10 @@ namespace llvm::AMDGPU {
#include "AMDGPUGenSearchableTables.inc"
} // namespace llvm::AMDGPU

static cl::opt<bool> DisableDiffBasePtrMemClustering(
"amdgpu-disable-diff-baseptr-mem-clustering",
cl::desc("Disable clustering memory ops with different base pointers"),
cl::init(false), cl::Hidden);
static cl::opt<bool> EnableDiffBasePtrMemClustering(
"amdgpu-enable-diff-baseptr-mem-clustering",
cl::desc("Enable clustering memory ops with different base pointers"),
cl::init(true), cl::Hidden);

// Must be at least 4 to be able to branch over minimum unconditional branch
// code. This is only for making it possible to write reasonably small tests for
Expand Down Expand Up @@ -585,10 +585,24 @@ bool SIInstrInfo::shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent();
const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent();

if (!DisableDiffBasePtrMemClustering) {
if (EnableDiffBasePtrMemClustering) {
// Only consider memory ops from same addrspace for clustering
if (!memOpsHaveSameAddrspace(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2))
return false;

// Don't cluster scalar and vecter memory ops
const MachineFunction &MF = *FirstLdSt.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
if (FirstLdSt.getOperand(0).isReg() &&
SecondLdSt.getOperand(0).isReg()) {
bool isFirstVecReg = RI.isVectorRegister(MRI,
FirstLdSt.getOperand(0).getReg());
bool isSecondVecReg = RI.isVectorRegister(MRI,
SecondLdSt.getOperand(0).getReg());
if ((isFirstVecReg && !isSecondVecReg) ||
(!isFirstVecReg && isSecondVecReg))
return false;
}
} else {
// If the mem ops (to be clustered) do not have the same base ptr, then
// they should not be clustered
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -288,16 +288,16 @@ define amdgpu_kernel void @llvm_amdgcn_queue_ptr(ptr addrspace(1) %ptr) #0 {
; GFX8V4-NEXT: v_mov_b32_e32 v0, s0
; GFX8V4-NEXT: v_mov_b32_e32 v1, s1
; GFX8V4-NEXT: flat_load_ubyte v0, v[0:1] glc
; GFX8V4-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; GFX8V4-NEXT: s_waitcnt vmcnt(0)
; GFX8V4-NEXT: v_mov_b32_e32 v0, s4
; GFX8V4-NEXT: v_mov_b32_e32 v1, s5
; GFX8V4-NEXT: flat_load_ubyte v0, v[0:1] glc
; GFX8V4-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; GFX8V4-NEXT: s_waitcnt vmcnt(0)
; GFX8V4-NEXT: v_mov_b32_e32 v0, s10
; GFX8V4-NEXT: v_mov_b32_e32 v1, s11
; GFX8V4-NEXT: s_waitcnt lgkmcnt(0)
; GFX8V4-NEXT: v_mov_b32_e32 v3, s1
; GFX8V4-NEXT: v_mov_b32_e32 v1, s11
; GFX8V4-NEXT: v_mov_b32_e32 v2, s0
; GFX8V4-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GFX8V4-NEXT: s_waitcnt vmcnt(0)
Expand All @@ -314,16 +314,16 @@ define amdgpu_kernel void @llvm_amdgcn_queue_ptr(ptr addrspace(1) %ptr) #0 {
; GFX8V5-NEXT: v_mov_b32_e32 v0, s0
; GFX8V5-NEXT: v_mov_b32_e32 v1, s1
; GFX8V5-NEXT: flat_load_ubyte v0, v[0:1] glc
; GFX8V5-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; GFX8V5-NEXT: s_waitcnt vmcnt(0)
; GFX8V5-NEXT: v_mov_b32_e32 v0, s4
; GFX8V5-NEXT: v_mov_b32_e32 v1, s5
; GFX8V5-NEXT: flat_load_ubyte v0, v[0:1] glc
; GFX8V5-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; GFX8V5-NEXT: s_waitcnt vmcnt(0)
; GFX8V5-NEXT: v_mov_b32_e32 v0, s10
; GFX8V5-NEXT: v_mov_b32_e32 v1, s11
; GFX8V5-NEXT: s_waitcnt lgkmcnt(0)
; GFX8V5-NEXT: v_mov_b32_e32 v3, s1
; GFX8V5-NEXT: v_mov_b32_e32 v1, s11
; GFX8V5-NEXT: v_mov_b32_e32 v2, s0
; GFX8V5-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GFX8V5-NEXT: s_waitcnt vmcnt(0)
Expand Down
50 changes: 25 additions & 25 deletions llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll
Original file line number Diff line number Diff line change
Expand Up @@ -513,16 +513,16 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
; GFX908-LABEL: introduced_copy_to_sgpr:
; GFX908: ; %bb.0: ; %bb
; GFX908-NEXT: global_load_ushort v16, v[0:1], off glc
; GFX908-NEXT: s_load_dword s0, s[8:9], 0x18
; GFX908-NEXT: s_load_dwordx4 s[4:7], s[8:9], 0x0
; GFX908-NEXT: s_load_dwordx2 s[10:11], s[8:9], 0x10
; GFX908-NEXT: s_mov_b32 s8, 0
; GFX908-NEXT: s_mov_b32 s13, s8
; GFX908-NEXT: v_mov_b32_e32 v19, 0
; GFX908-NEXT: s_load_dword s0, s[8:9], 0x18
; GFX908-NEXT: s_mov_b32 s12, 0
; GFX908-NEXT: s_mov_b32 s9, s12
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
; GFX908-NEXT: v_cvt_f32_u32_e32 v0, s7
; GFX908-NEXT: s_sub_i32 s1, 0, s7
; GFX908-NEXT: v_cvt_f32_f16_e32 v17, s0
; GFX908-NEXT: v_mov_b32_e32 v19, 0
; GFX908-NEXT: v_rcp_iflag_f32_e32 v2, v0
; GFX908-NEXT: v_mov_b32_e32 v0, 0
; GFX908-NEXT: v_mov_b32_e32 v1, 0
Expand All @@ -542,14 +542,14 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
; GFX908-NEXT: s_cselect_b32 s2, s6, s2
; GFX908-NEXT: s_add_i32 s3, s1, 1
; GFX908-NEXT: s_cmp_ge_u32 s2, s7
; GFX908-NEXT: s_cselect_b32 s12, s3, s1
; GFX908-NEXT: s_cselect_b32 s8, s3, s1
; GFX908-NEXT: s_lshr_b32 s2, s0, 16
; GFX908-NEXT: v_cvt_f32_f16_e32 v18, s2
; GFX908-NEXT: s_lshl_b64 s[6:7], s[4:5], 5
; GFX908-NEXT: s_lshl_b64 s[14:15], s[10:11], 5
; GFX908-NEXT: s_and_b64 s[0:1], exec, s[0:1]
; GFX908-NEXT: s_or_b32 s14, s14, 28
; GFX908-NEXT: s_lshl_b64 s[16:17], s[12:13], 5
; GFX908-NEXT: s_lshl_b64 s[16:17], s[8:9], 5
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: v_readfirstlane_b32 s2, v16
; GFX908-NEXT: s_and_b32 s2, 0xffff, s2
Expand All @@ -573,15 +573,15 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
; GFX908-NEXT: ; in Loop: Header=BB3_2 Depth=1
; GFX908-NEXT: global_load_dwordx2 v[2:3], v[0:1], off
; GFX908-NEXT: v_cmp_gt_i64_e64 s[2:3], s[10:11], -1
; GFX908-NEXT: s_mov_b32 s9, s8
; GFX908-NEXT: s_mov_b32 s13, s12
; GFX908-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[2:3]
; GFX908-NEXT: v_mov_b32_e32 v4, s8
; GFX908-NEXT: v_mov_b32_e32 v4, s12
; GFX908-NEXT: v_cmp_ne_u32_e64 s[2:3], 1, v6
; GFX908-NEXT: v_mov_b32_e32 v6, s8
; GFX908-NEXT: v_mov_b32_e32 v8, s8
; GFX908-NEXT: v_mov_b32_e32 v5, s9
; GFX908-NEXT: v_mov_b32_e32 v7, s9
; GFX908-NEXT: v_mov_b32_e32 v9, s9
; GFX908-NEXT: v_mov_b32_e32 v6, s12
; GFX908-NEXT: v_mov_b32_e32 v8, s12
; GFX908-NEXT: v_mov_b32_e32 v5, s13
; GFX908-NEXT: v_mov_b32_e32 v7, s13
; GFX908-NEXT: v_mov_b32_e32 v9, s13
; GFX908-NEXT: v_cmp_lt_i64_e64 s[18:19], s[10:11], 0
; GFX908-NEXT: v_mov_b32_e32 v11, v5
; GFX908-NEXT: s_mov_b64 s[20:21], s[14:15]
Expand Down Expand Up @@ -667,7 +667,7 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
; GFX908-NEXT: s_cbranch_vccz .LBB3_1
; GFX908-NEXT: ; %bb.11: ; %bb12
; GFX908-NEXT: ; in Loop: Header=BB3_2 Depth=1
; GFX908-NEXT: s_add_u32 s10, s10, s12
; GFX908-NEXT: s_add_u32 s10, s10, s8
; GFX908-NEXT: s_addc_u32 s11, s11, 0
; GFX908-NEXT: s_add_u32 s14, s14, s16
; GFX908-NEXT: s_addc_u32 s15, s15, s17
Expand All @@ -679,15 +679,15 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
; GFX90A-LABEL: introduced_copy_to_sgpr:
; GFX90A: ; %bb.0: ; %bb
; GFX90A-NEXT: global_load_ushort v18, v[0:1], off glc
; GFX90A-NEXT: s_load_dword s0, s[8:9], 0x18
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[8:9], 0x0
; GFX90A-NEXT: s_load_dwordx2 s[10:11], s[8:9], 0x10
; GFX90A-NEXT: s_mov_b32 s8, 0
; GFX90A-NEXT: s_mov_b32 s13, s8
; GFX90A-NEXT: v_mov_b32_e32 v19, 0
; GFX90A-NEXT: s_load_dword s0, s[8:9], 0x18
; GFX90A-NEXT: s_mov_b32 s12, 0
; GFX90A-NEXT: s_mov_b32 s9, s12
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_cvt_f32_u32_e32 v0, s7
; GFX90A-NEXT: s_sub_i32 s1, 0, s7
; GFX90A-NEXT: v_mov_b32_e32 v19, 0
; GFX90A-NEXT: v_rcp_iflag_f32_e32 v2, v0
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], 0, 0
; GFX90A-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
Expand All @@ -707,14 +707,14 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
; GFX90A-NEXT: s_cselect_b32 s2, s6, s2
; GFX90A-NEXT: s_add_i32 s3, s1, 1
; GFX90A-NEXT: s_cmp_ge_u32 s2, s7
; GFX90A-NEXT: s_cselect_b32 s12, s3, s1
; GFX90A-NEXT: s_cselect_b32 s8, s3, s1
; GFX90A-NEXT: s_lshr_b32 s2, s0, 16
; GFX90A-NEXT: v_cvt_f32_f16_e32 v3, s2
; GFX90A-NEXT: s_lshl_b64 s[6:7], s[4:5], 5
; GFX90A-NEXT: s_lshl_b64 s[14:15], s[10:11], 5
; GFX90A-NEXT: s_and_b64 s[0:1], exec, s[0:1]
; GFX90A-NEXT: s_or_b32 s14, s14, 28
; GFX90A-NEXT: s_lshl_b64 s[16:17], s[12:13], 5
; GFX90A-NEXT: s_lshl_b64 s[16:17], s[8:9], 5
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_readfirstlane_b32 s2, v18
; GFX90A-NEXT: s_and_b32 s2, 0xffff, s2
Expand All @@ -738,12 +738,12 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
; GFX90A-NEXT: ; in Loop: Header=BB3_2 Depth=1
; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
; GFX90A-NEXT: v_cmp_gt_i64_e64 s[2:3], s[10:11], -1
; GFX90A-NEXT: s_mov_b32 s9, s8
; GFX90A-NEXT: s_mov_b32 s13, s12
; GFX90A-NEXT: v_cndmask_b32_e64 v8, 0, 1, s[2:3]
; GFX90A-NEXT: v_pk_mov_b32 v[6:7], s[8:9], s[8:9] op_sel:[0,1]
; GFX90A-NEXT: v_pk_mov_b32 v[6:7], s[12:13], s[12:13] op_sel:[0,1]
; GFX90A-NEXT: v_cmp_ne_u32_e64 s[2:3], 1, v8
; GFX90A-NEXT: v_pk_mov_b32 v[8:9], s[8:9], s[8:9] op_sel:[0,1]
; GFX90A-NEXT: v_pk_mov_b32 v[10:11], s[8:9], s[8:9] op_sel:[0,1]
; GFX90A-NEXT: v_pk_mov_b32 v[8:9], s[12:13], s[12:13] op_sel:[0,1]
; GFX90A-NEXT: v_pk_mov_b32 v[10:11], s[12:13], s[12:13] op_sel:[0,1]
; GFX90A-NEXT: v_cmp_lt_i64_e64 s[18:19], s[10:11], 0
; GFX90A-NEXT: s_mov_b64 s[20:21], s[14:15]
; GFX90A-NEXT: v_pk_mov_b32 v[12:13], v[6:7], v[6:7] op_sel:[0,1]
Expand Down Expand Up @@ -821,7 +821,7 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
; GFX90A-NEXT: s_cbranch_vccz .LBB3_1
; GFX90A-NEXT: ; %bb.11: ; %bb12
; GFX90A-NEXT: ; in Loop: Header=BB3_2 Depth=1
; GFX90A-NEXT: s_add_u32 s10, s10, s12
; GFX90A-NEXT: s_add_u32 s10, s10, s8
; GFX90A-NEXT: s_addc_u32 s11, s11, 0
; GFX90A-NEXT: s_add_u32 s14, s14, s16
; GFX90A-NEXT: s_addc_u32 s15, s15, s17
Expand Down
Loading
Loading