Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions llvm/lib/Analysis/MemorySSA.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,17 @@ static bool areLoadsReorderable(const LoadInst *Use,
return !(SeqCstUse || MayClobberIsAcquire);
}

bool hasInaccessibleMemoryClobber(const CallBase *CallFirst,
const CallBase *CallSecond) {

MemoryEffects ME1 = CallFirst->getMemoryEffects();
MemoryEffects ME2 = CallSecond->getMemoryEffects();
if (CallFirst->onlyAccessesInaccessibleMemory() ||
CallSecond->onlyAccessesInaccessibleMemory())
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Isn't onlyAccessesInaccessibleMemory implemented in terms of MemoryEffects? Can you keep all of this in terms of MemoryEffects?

Copy link
Contributor Author

@CarolineConcatto CarolineConcatto Nov 26, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hello @arsenm,
Not sure I understand the question.
onlyAccessesInaccessibleMemory is checking the memory effect of the call. It calls onlyAccessesInaccessibleMem that check if the memory effect only uses Inaccessible memory:
return getWithoutLoc(Location::InaccessibleMem).doesNotAccessMemory();
Do you mean I should look at a Data?
But AFAIU that is what this does indirectly. It checks if any read or/and write is done in the Inaccessible memory locations

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I mean stop using this wrapper, and directly query the MemoryEffects

Copy link
Contributor Author

@CarolineConcatto CarolineConcatto Nov 26, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am not sure why exposing memory effects for both calls inside instructionClobbersQuery function is better than hidden inside the function hasInaccessibleMemoryClobber.
Do you mind explaining what is the reason?
I could check the memory effects in instructionClobbersQuery, I would just want to understand why you are asking for that?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @arsenm ,
I did remove the function hasInaccessibleMemoryClobber.
Does this solves this comment?

return !(ME1 & ME2 & MemoryEffects::writeOnly()).onlyReadsMemory();
return true;
}

template <typename AliasAnalysisType>
static bool
instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
Expand Down Expand Up @@ -311,6 +322,9 @@ instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
}

if (auto *CB = dyn_cast_or_null<CallBase>(UseInst)) {
if (auto *CU = dyn_cast_or_null<CallBase>(DefInst))
if (!hasInaccessibleMemoryClobber(CB, CU))
return false;
ModRefInfo I = AA.getModRefInfo(DefInst, CB);
return isModOrRefSet(I);
}
Expand Down
90 changes: 45 additions & 45 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.wqm.demote.ll
Original file line number Diff line number Diff line change
Expand Up @@ -885,7 +885,7 @@ define amdgpu_ps void @wqm_deriv_loop(<2 x float> %input, float %arg, i32 %index
; SI-NEXT: s_mov_b64 s[0:1], exec
; SI-NEXT: s_wqm_b64 exec, exec
; SI-NEXT: v_cvt_i32_f32_e32 v0, v0
; SI-NEXT: s_mov_b32 s4, 0
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_saveexec_b64 s[2:3], vcc
; SI-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
Expand All @@ -894,24 +894,24 @@ define amdgpu_ps void @wqm_deriv_loop(<2 x float> %input, float %arg, i32 %index
; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], exec
; SI-NEXT: s_cbranch_scc0 .LBB7_9
; SI-NEXT: ; %bb.2: ; %.demote0
; SI-NEXT: s_wqm_b64 s[6:7], s[0:1]
; SI-NEXT: s_and_b64 exec, exec, s[6:7]
; SI-NEXT: s_wqm_b64 s[4:5], s[0:1]
; SI-NEXT: s_and_b64 exec, exec, s[4:5]
; SI-NEXT: .LBB7_3: ; %.continue0.preheader
; SI-NEXT: s_or_b64 exec, exec, s[2:3]
; SI-NEXT: s_mov_b64 s[4:5], s[0:1]
; SI-NEXT: s_mov_b64 s[2:3], 0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: s_branch .LBB7_5
; SI-NEXT: .LBB7_4: ; %.continue1
; SI-NEXT: ; in Loop: Header=BB7_5 Depth=1
; SI-NEXT: s_or_b64 exec, exec, s[4:5]
; SI-NEXT: s_or_b64 exec, exec, s[6:7]
; SI-NEXT: v_add_u32_e32 v0, vcc, 1, v0
; SI-NEXT: v_cmp_ge_i32_e32 vcc, v0, v1
; SI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; SI-NEXT: s_andn2_b64 exec, exec, s[2:3]
; SI-NEXT: s_cbranch_execz .LBB7_8
; SI-NEXT: .LBB7_5: ; %.continue0
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_mov_b64 s[4:5], s[0:1]
; SI-NEXT: v_cndmask_b32_e64 v2, v0, 0, s[4:5]
; SI-NEXT: v_mov_b32_e32 v3, v2
; SI-NEXT: s_nop 1
Expand All @@ -920,19 +920,19 @@ define amdgpu_ps void @wqm_deriv_loop(<2 x float> %input, float %arg, i32 %index
; SI-NEXT: v_subrev_f32_dpp v2, v2, v3 quad_perm:[0,0,0,0] row_mask:0xf bank_mask:0xf bound_ctrl:1
; SI-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $exec
; SI-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
; SI-NEXT: s_and_b64 s[4:5], s[0:1], vcc
; SI-NEXT: s_xor_b64 s[4:5], s[4:5], -1
; SI-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; SI-NEXT: s_xor_b64 s[4:5], exec, s[6:7]
; SI-NEXT: s_and_b64 s[6:7], s[4:5], vcc
; SI-NEXT: s_xor_b64 s[6:7], s[6:7], -1
; SI-NEXT: s_and_saveexec_b64 s[8:9], s[6:7]
; SI-NEXT: s_xor_b64 s[6:7], exec, s[8:9]
; SI-NEXT: s_cbranch_execz .LBB7_4
; SI-NEXT: ; %bb.6: ; %.demote1
; SI-NEXT: ; in Loop: Header=BB7_5 Depth=1
; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], exec
; SI-NEXT: s_cbranch_scc0 .LBB7_9
; SI-NEXT: ; %bb.7: ; %.demote1
; SI-NEXT: ; in Loop: Header=BB7_5 Depth=1
; SI-NEXT: s_wqm_b64 s[6:7], s[0:1]
; SI-NEXT: s_and_b64 exec, exec, s[6:7]
; SI-NEXT: s_wqm_b64 s[8:9], s[0:1]
; SI-NEXT: s_and_b64 exec, exec, s[8:9]
; SI-NEXT: s_branch .LBB7_4
; SI-NEXT: .LBB7_8: ; %.return
; SI-NEXT: s_or_b64 exec, exec, s[2:3]
Expand All @@ -951,7 +951,7 @@ define amdgpu_ps void @wqm_deriv_loop(<2 x float> %input, float %arg, i32 %index
; GFX9-NEXT: s_mov_b64 s[0:1], exec
; GFX9-NEXT: s_wqm_b64 exec, exec
; GFX9-NEXT: v_cvt_i32_f32_e32 v0, v0
; GFX9-NEXT: s_mov_b32 s4, 0
; GFX9-NEXT: s_mov_b32 s6, 0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
Expand All @@ -960,24 +960,24 @@ define amdgpu_ps void @wqm_deriv_loop(<2 x float> %input, float %arg, i32 %index
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], exec
; GFX9-NEXT: s_cbranch_scc0 .LBB7_9
; GFX9-NEXT: ; %bb.2: ; %.demote0
; GFX9-NEXT: s_wqm_b64 s[6:7], s[0:1]
; GFX9-NEXT: s_and_b64 exec, exec, s[6:7]
; GFX9-NEXT: s_wqm_b64 s[4:5], s[0:1]
; GFX9-NEXT: s_and_b64 exec, exec, s[4:5]
; GFX9-NEXT: .LBB7_3: ; %.continue0.preheader
; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX9-NEXT: s_mov_b64 s[2:3], 0
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: s_branch .LBB7_5
; GFX9-NEXT: .LBB7_4: ; %.continue1
; GFX9-NEXT: ; in Loop: Header=BB7_5 Depth=1
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX9-NEXT: v_add_u32_e32 v0, 1, v0
; GFX9-NEXT: v_cmp_ge_i32_e32 vcc, v0, v1
; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-NEXT: s_cbranch_execz .LBB7_8
; GFX9-NEXT: .LBB7_5: ; %.continue0
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX9-NEXT: v_cndmask_b32_e64 v2, v0, 0, s[4:5]
; GFX9-NEXT: v_mov_b32_e32 v3, v2
; GFX9-NEXT: s_nop 1
Expand All @@ -986,19 +986,19 @@ define amdgpu_ps void @wqm_deriv_loop(<2 x float> %input, float %arg, i32 %index
; GFX9-NEXT: v_subrev_f32_dpp v2, v2, v3 quad_perm:[0,0,0,0] row_mask:0xf bank_mask:0xf bound_ctrl:1
; GFX9-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $exec
; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
; GFX9-NEXT: s_and_b64 s[4:5], s[0:1], vcc
; GFX9-NEXT: s_xor_b64 s[4:5], s[4:5], -1
; GFX9-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; GFX9-NEXT: s_xor_b64 s[4:5], exec, s[6:7]
; GFX9-NEXT: s_and_b64 s[6:7], s[4:5], vcc
; GFX9-NEXT: s_xor_b64 s[6:7], s[6:7], -1
; GFX9-NEXT: s_and_saveexec_b64 s[8:9], s[6:7]
; GFX9-NEXT: s_xor_b64 s[6:7], exec, s[8:9]
; GFX9-NEXT: s_cbranch_execz .LBB7_4
; GFX9-NEXT: ; %bb.6: ; %.demote1
; GFX9-NEXT: ; in Loop: Header=BB7_5 Depth=1
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], exec
; GFX9-NEXT: s_cbranch_scc0 .LBB7_9
; GFX9-NEXT: ; %bb.7: ; %.demote1
; GFX9-NEXT: ; in Loop: Header=BB7_5 Depth=1
; GFX9-NEXT: s_wqm_b64 s[6:7], s[0:1]
; GFX9-NEXT: s_and_b64 exec, exec, s[6:7]
; GFX9-NEXT: s_wqm_b64 s[8:9], s[0:1]
; GFX9-NEXT: s_and_b64 exec, exec, s[8:9]
; GFX9-NEXT: s_branch .LBB7_4
; GFX9-NEXT: .LBB7_8: ; %.return
; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
Expand Down Expand Up @@ -1031,37 +1031,37 @@ define amdgpu_ps void @wqm_deriv_loop(<2 x float> %input, float %arg, i32 %index
; GFX10-32-NEXT: .LBB7_3: ; %.continue0.preheader
; GFX10-32-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX10-32-NEXT: v_mov_b32_e32 v0, s1
; GFX10-32-NEXT: s_mov_b32 s2, s0
; GFX10-32-NEXT: s_branch .LBB7_5
; GFX10-32-NEXT: .LBB7_4: ; %.continue1
; GFX10-32-NEXT: ; in Loop: Header=BB7_5 Depth=1
; GFX10-32-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX10-32-NEXT: s_or_b32 exec_lo, exec_lo, s3
; GFX10-32-NEXT: v_add_nc_u32_e32 v0, 1, v0
; GFX10-32-NEXT: v_cmp_ge_i32_e32 vcc_lo, v0, v1
; GFX10-32-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX10-32-NEXT: s_andn2_b32 exec_lo, exec_lo, s1
; GFX10-32-NEXT: s_cbranch_execz .LBB7_8
; GFX10-32-NEXT: .LBB7_5: ; %.continue0
; GFX10-32-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-32-NEXT: s_mov_b32 s2, s0
; GFX10-32-NEXT: v_cndmask_b32_e64 v2, v0, 0, s2
; GFX10-32-NEXT: v_mov_b32_e32 v3, v2
; GFX10-32-NEXT: v_mov_b32_dpp v3, v3 quad_perm:[1,1,1,1] row_mask:0xf bank_mask:0xf bound_ctrl:1
; GFX10-32-NEXT: v_subrev_f32_dpp v2, v2, v3 quad_perm:[0,0,0,0] row_mask:0xf bank_mask:0xf bound_ctrl:1
; GFX10-32-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $exec
; GFX10-32-NEXT: v_cmp_eq_f32_e32 vcc_lo, 0, v2
; GFX10-32-NEXT: s_and_b32 s2, s0, vcc_lo
; GFX10-32-NEXT: s_xor_b32 s2, s2, -1
; GFX10-32-NEXT: s_and_saveexec_b32 s3, s2
; GFX10-32-NEXT: s_xor_b32 s2, exec_lo, s3
; GFX10-32-NEXT: s_and_b32 s3, s2, vcc_lo
; GFX10-32-NEXT: s_xor_b32 s3, s3, -1
; GFX10-32-NEXT: s_and_saveexec_b32 s4, s3
; GFX10-32-NEXT: s_xor_b32 s3, exec_lo, s4
; GFX10-32-NEXT: s_cbranch_execz .LBB7_4
; GFX10-32-NEXT: ; %bb.6: ; %.demote1
; GFX10-32-NEXT: ; in Loop: Header=BB7_5 Depth=1
; GFX10-32-NEXT: s_andn2_b32 s0, s0, exec_lo
; GFX10-32-NEXT: s_cbranch_scc0 .LBB7_9
; GFX10-32-NEXT: ; %bb.7: ; %.demote1
; GFX10-32-NEXT: ; in Loop: Header=BB7_5 Depth=1
; GFX10-32-NEXT: s_wqm_b32 s3, s0
; GFX10-32-NEXT: s_and_b32 exec_lo, exec_lo, s3
; GFX10-32-NEXT: s_wqm_b32 s4, s0
; GFX10-32-NEXT: s_and_b32 exec_lo, exec_lo, s4
; GFX10-32-NEXT: s_branch .LBB7_4
; GFX10-32-NEXT: .LBB7_8: ; %.return
; GFX10-32-NEXT: s_or_b32 exec_lo, exec_lo, s1
Expand Down Expand Up @@ -1094,41 +1094,41 @@ define amdgpu_ps void @wqm_deriv_loop(<2 x float> %input, float %arg, i32 %index
; GFX10-64-NEXT: .LBB7_3: ; %.continue0.preheader
; GFX10-64-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX10-64-NEXT: v_mov_b32_e32 v0, s4
; GFX10-64-NEXT: s_mov_b64 s[2:3], 0
; GFX10-64-NEXT: s_mov_b64 s[2:3], s[0:1]
; GFX10-64-NEXT: s_mov_b64 s[4:5], 0
; GFX10-64-NEXT: s_branch .LBB7_5
; GFX10-64-NEXT: .LBB7_4: ; %.continue1
; GFX10-64-NEXT: ; in Loop: Header=BB7_5 Depth=1
; GFX10-64-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX10-64-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX10-64-NEXT: v_add_nc_u32_e32 v0, 1, v0
; GFX10-64-NEXT: v_cmp_ge_i32_e32 vcc, v0, v1
; GFX10-64-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX10-64-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX10-64-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; GFX10-64-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX10-64-NEXT: s_cbranch_execz .LBB7_8
; GFX10-64-NEXT: .LBB7_5: ; %.continue0
; GFX10-64-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-64-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX10-64-NEXT: v_cndmask_b32_e64 v2, v0, 0, s[4:5]
; GFX10-64-NEXT: v_cndmask_b32_e64 v2, v0, 0, s[2:3]
; GFX10-64-NEXT: v_mov_b32_e32 v3, v2
; GFX10-64-NEXT: v_mov_b32_dpp v3, v3 quad_perm:[1,1,1,1] row_mask:0xf bank_mask:0xf bound_ctrl:1
; GFX10-64-NEXT: v_subrev_f32_dpp v2, v2, v3 quad_perm:[0,0,0,0] row_mask:0xf bank_mask:0xf bound_ctrl:1
; GFX10-64-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $exec
; GFX10-64-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
; GFX10-64-NEXT: s_and_b64 s[4:5], s[0:1], vcc
; GFX10-64-NEXT: s_xor_b64 s[4:5], s[4:5], -1
; GFX10-64-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; GFX10-64-NEXT: s_xor_b64 s[4:5], exec, s[6:7]
; GFX10-64-NEXT: s_and_b64 s[6:7], s[2:3], vcc
; GFX10-64-NEXT: s_xor_b64 s[6:7], s[6:7], -1
; GFX10-64-NEXT: s_and_saveexec_b64 s[8:9], s[6:7]
; GFX10-64-NEXT: s_xor_b64 s[6:7], exec, s[8:9]
; GFX10-64-NEXT: s_cbranch_execz .LBB7_4
; GFX10-64-NEXT: ; %bb.6: ; %.demote1
; GFX10-64-NEXT: ; in Loop: Header=BB7_5 Depth=1
; GFX10-64-NEXT: s_andn2_b64 s[0:1], s[0:1], exec
; GFX10-64-NEXT: s_cbranch_scc0 .LBB7_9
; GFX10-64-NEXT: ; %bb.7: ; %.demote1
; GFX10-64-NEXT: ; in Loop: Header=BB7_5 Depth=1
; GFX10-64-NEXT: s_wqm_b64 s[6:7], s[0:1]
; GFX10-64-NEXT: s_and_b64 exec, exec, s[6:7]
; GFX10-64-NEXT: s_wqm_b64 s[8:9], s[0:1]
; GFX10-64-NEXT: s_and_b64 exec, exec, s[8:9]
; GFX10-64-NEXT: s_branch .LBB7_4
; GFX10-64-NEXT: .LBB7_8: ; %.return
; GFX10-64-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX10-64-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX10-64-NEXT: s_and_b64 exec, exec, s[0:1]
; GFX10-64-NEXT: v_mov_b32_e32 v0, 0x3c00
; GFX10-64-NEXT: v_bfrev_b32_e32 v1, 60
Expand Down
Loading