diff --git a/llvm/test/CodeGen/AMDGPU/atomics_cond_sub.ll b/llvm/test/CodeGen/AMDGPU/atomics_cond_sub.ll index e74fd21365c9d..90ef9a7a45863 100644 --- a/llvm/test/CodeGen/AMDGPU/atomics_cond_sub.ll +++ b/llvm/test/CodeGen/AMDGPU/atomics_cond_sub.ll @@ -25,7 +25,7 @@ define amdgpu_kernel void @flat_atomic_cond_sub_no_rtn_u32(ptr %addr, i32 %in) { ; GFX12-GISEL-NEXT: flat_atomic_cond_sub_u32 v0, v[0:1], v2 offset:-16 th:TH_ATOMIC_RETURN ; GFX12-GISEL-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %addr, i32 -4 + %gep = getelementptr inbounds i32, ptr %addr, i32 -4 %unused = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p0(ptr %gep, i32 %in) ret void } @@ -49,7 +49,7 @@ define amdgpu_kernel void @flat_atomic_cond_sub_no_rtn_u32_forced(ptr %addr, i32 ; GFX12-GISEL-NEXT: flat_atomic_cond_sub_u32 v[0:1], v2 offset:-16 ; GFX12-GISEL-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %addr, i32 -4 + %gep = getelementptr inbounds i32, ptr %addr, i32 -4 %unused = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p0(ptr %gep, i32 %in) ret void } @@ -83,7 +83,7 @@ define amdgpu_kernel void @flat_atomic_cond_sub_rtn_u32(ptr %addr, i32 %in, ptr ; GFX12-GISEL-NEXT: flat_store_b32 v[0:1], v2 ; GFX12-GISEL-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %addr, i32 4 + %gep = getelementptr inbounds i32, ptr %addr, i32 4 %val = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p0(ptr %gep, i32 %in) store i32 %val, ptr %use ret void diff --git a/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-flat.ll b/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-flat.ll index 890f4f77ed107..e509d7b2b9b1b 100644 --- a/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-flat.ll +++ b/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-flat.ll @@ -12,8 +12,8 @@ define void @test_sinkable_flat_small_offset_i32(ptr %out, ptr %in, i32 %cond) { ; OPT-GFX7-LABEL: @test_sinkable_flat_small_offset_i32( ; OPT-GFX7-NEXT: entry: -; OPT-GFX7-NEXT: [[OUT_GEP:%.*]] = getelementptr i32, ptr [[OUT:%.*]], i64 999999 -; OPT-GFX7-NEXT: [[IN_GEP:%.*]] = getelementptr i32, ptr [[IN:%.*]], i64 7 +; OPT-GFX7-NEXT: [[OUT_GEP:%.*]] = getelementptr inbounds i32, ptr [[OUT:%.*]], i64 999999 +; OPT-GFX7-NEXT: [[IN_GEP:%.*]] = getelementptr inbounds i32, ptr [[IN:%.*]], i64 7 ; OPT-GFX7-NEXT: [[CMP0:%.*]] = icmp eq i32 [[COND:%.*]], 0 ; OPT-GFX7-NEXT: br i1 [[CMP0]], label [[ENDIF:%.*]], label [[IF:%.*]] ; OPT-GFX7: if: @@ -28,8 +28,8 @@ define void @test_sinkable_flat_small_offset_i32(ptr %out, ptr %in, i32 %cond) { ; ; OPT-GFX8-LABEL: @test_sinkable_flat_small_offset_i32( ; OPT-GFX8-NEXT: entry: -; OPT-GFX8-NEXT: [[OUT_GEP:%.*]] = getelementptr i32, ptr [[OUT:%.*]], i64 999999 -; OPT-GFX8-NEXT: [[IN_GEP:%.*]] = getelementptr i32, ptr [[IN:%.*]], i64 7 +; OPT-GFX8-NEXT: [[OUT_GEP:%.*]] = getelementptr inbounds i32, ptr [[OUT:%.*]], i64 999999 +; OPT-GFX8-NEXT: [[IN_GEP:%.*]] = getelementptr inbounds i32, ptr [[IN:%.*]], i64 7 ; OPT-GFX8-NEXT: [[CMP0:%.*]] = icmp eq i32 [[COND:%.*]], 0 ; OPT-GFX8-NEXT: br i1 [[CMP0]], label [[ENDIF:%.*]], label [[IF:%.*]] ; OPT-GFX8: if: @@ -44,11 +44,11 @@ define void @test_sinkable_flat_small_offset_i32(ptr %out, ptr %in, i32 %cond) { ; ; OPT-GFX9-LABEL: @test_sinkable_flat_small_offset_i32( ; OPT-GFX9-NEXT: entry: -; OPT-GFX9-NEXT: [[OUT_GEP:%.*]] = getelementptr i32, ptr [[OUT:%.*]], i64 999999 +; OPT-GFX9-NEXT: [[OUT_GEP:%.*]] = getelementptr inbounds i32, ptr [[OUT:%.*]], i64 999999 ; OPT-GFX9-NEXT: [[CMP0:%.*]] = icmp eq i32 [[COND:%.*]], 0 ; OPT-GFX9-NEXT: br i1 [[CMP0]], label [[ENDIF:%.*]], label [[IF:%.*]] ; OPT-GFX9: if: -; OPT-GFX9-NEXT: [[SUNKADDR:%.*]] = getelementptr i8, ptr [[IN:%.*]], i64 28 +; OPT-GFX9-NEXT: [[SUNKADDR:%.*]] = getelementptr inbounds i8, ptr [[IN:%.*]], i64 28 ; OPT-GFX9-NEXT: [[LOAD:%.*]] = load i32, ptr [[SUNKADDR]], align 4 ; OPT-GFX9-NEXT: br label [[ENDIF]] ; OPT-GFX9: endif: @@ -58,11 +58,11 @@ define void @test_sinkable_flat_small_offset_i32(ptr %out, ptr %in, i32 %cond) { ; ; OPT-GFX10-LABEL: @test_sinkable_flat_small_offset_i32( ; OPT-GFX10-NEXT: entry: -; OPT-GFX10-NEXT: [[OUT_GEP:%.*]] = getelementptr i32, ptr [[OUT:%.*]], i64 999999 +; OPT-GFX10-NEXT: [[OUT_GEP:%.*]] = getelementptr inbounds i32, ptr [[OUT:%.*]], i64 999999 ; OPT-GFX10-NEXT: [[CMP0:%.*]] = icmp eq i32 [[COND:%.*]], 0 ; OPT-GFX10-NEXT: br i1 [[CMP0]], label [[ENDIF:%.*]], label [[IF:%.*]] ; OPT-GFX10: if: -; OPT-GFX10-NEXT: [[SUNKADDR:%.*]] = getelementptr i8, ptr [[IN:%.*]], i64 28 +; OPT-GFX10-NEXT: [[SUNKADDR:%.*]] = getelementptr inbounds i8, ptr [[IN:%.*]], i64 28 ; OPT-GFX10-NEXT: [[LOAD:%.*]] = load i32, ptr [[SUNKADDR]], align 4 ; OPT-GFX10-NEXT: br label [[ENDIF]] ; OPT-GFX10: endif: @@ -146,8 +146,8 @@ define void @test_sinkable_flat_small_offset_i32(ptr %out, ptr %in, i32 %cond) { ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_setpc_b64 s[30:31] entry: - %out.gep = getelementptr i32, ptr %out, i64 999999 - %in.gep = getelementptr i32, ptr %in, i64 7 + %out.gep = getelementptr inbounds i32, ptr %out, i64 999999 + %in.gep = getelementptr inbounds i32, ptr %in, i64 7 %cmp0 = icmp eq i32 %cond, 0 br i1 %cmp0, label %endif, label %if @@ -167,12 +167,12 @@ done: define void @test_sink_noop_addrspacecast_flat_to_global_i32(ptr %out, ptr %in, i32 %cond) { ; OPT-GFX7-LABEL: @test_sink_noop_addrspacecast_flat_to_global_i32( ; OPT-GFX7-NEXT: entry: -; OPT-GFX7-NEXT: [[OUT_GEP:%.*]] = getelementptr i32, ptr [[OUT:%.*]], i64 999999 +; OPT-GFX7-NEXT: [[OUT_GEP:%.*]] = getelementptr inbounds i32, ptr [[OUT:%.*]], i64 999999 ; OPT-GFX7-NEXT: [[CMP0:%.*]] = icmp eq i32 [[COND:%.*]], 0 ; OPT-GFX7-NEXT: br i1 [[CMP0]], label [[ENDIF:%.*]], label [[IF:%.*]] ; OPT-GFX7: if: ; OPT-GFX7-NEXT: [[TMP0:%.*]] = addrspacecast ptr [[IN:%.*]] to ptr addrspace(1) -; OPT-GFX7-NEXT: [[SUNKADDR:%.*]] = getelementptr i8, ptr addrspace(1) [[TMP0]], i64 28 +; OPT-GFX7-NEXT: [[SUNKADDR:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP0]], i64 28 ; OPT-GFX7-NEXT: [[LOAD:%.*]] = load i32, ptr addrspace(1) [[SUNKADDR]], align 4 ; OPT-GFX7-NEXT: br label [[ENDIF]] ; OPT-GFX7: endif: @@ -182,8 +182,8 @@ define void @test_sink_noop_addrspacecast_flat_to_global_i32(ptr %out, ptr %in, ; ; OPT-GFX8-LABEL: @test_sink_noop_addrspacecast_flat_to_global_i32( ; OPT-GFX8-NEXT: entry: -; OPT-GFX8-NEXT: [[OUT_GEP:%.*]] = getelementptr i32, ptr [[OUT:%.*]], i64 999999 -; OPT-GFX8-NEXT: [[IN_GEP:%.*]] = getelementptr i32, ptr [[IN:%.*]], i64 7 +; OPT-GFX8-NEXT: [[OUT_GEP:%.*]] = getelementptr inbounds i32, ptr [[OUT:%.*]], i64 999999 +; OPT-GFX8-NEXT: [[IN_GEP:%.*]] = getelementptr inbounds i32, ptr [[IN:%.*]], i64 7 ; OPT-GFX8-NEXT: [[CMP0:%.*]] = icmp eq i32 [[COND:%.*]], 0 ; OPT-GFX8-NEXT: br i1 [[CMP0]], label [[ENDIF:%.*]], label [[IF:%.*]] ; OPT-GFX8: if: @@ -197,12 +197,12 @@ define void @test_sink_noop_addrspacecast_flat_to_global_i32(ptr %out, ptr %in, ; ; OPT-GFX9-LABEL: @test_sink_noop_addrspacecast_flat_to_global_i32( ; OPT-GFX9-NEXT: entry: -; OPT-GFX9-NEXT: [[OUT_GEP:%.*]] = getelementptr i32, ptr [[OUT:%.*]], i64 999999 +; OPT-GFX9-NEXT: [[OUT_GEP:%.*]] = getelementptr inbounds i32, ptr [[OUT:%.*]], i64 999999 ; OPT-GFX9-NEXT: [[CMP0:%.*]] = icmp eq i32 [[COND:%.*]], 0 ; OPT-GFX9-NEXT: br i1 [[CMP0]], label [[ENDIF:%.*]], label [[IF:%.*]] ; OPT-GFX9: if: ; OPT-GFX9-NEXT: [[TMP0:%.*]] = addrspacecast ptr [[IN:%.*]] to ptr addrspace(1) -; OPT-GFX9-NEXT: [[SUNKADDR:%.*]] = getelementptr i8, ptr addrspace(1) [[TMP0]], i64 28 +; OPT-GFX9-NEXT: [[SUNKADDR:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP0]], i64 28 ; OPT-GFX9-NEXT: [[LOAD:%.*]] = load i32, ptr addrspace(1) [[SUNKADDR]], align 4 ; OPT-GFX9-NEXT: br label [[ENDIF]] ; OPT-GFX9: endif: @@ -212,12 +212,12 @@ define void @test_sink_noop_addrspacecast_flat_to_global_i32(ptr %out, ptr %in, ; ; OPT-GFX10-LABEL: @test_sink_noop_addrspacecast_flat_to_global_i32( ; OPT-GFX10-NEXT: entry: -; OPT-GFX10-NEXT: [[OUT_GEP:%.*]] = getelementptr i32, ptr [[OUT:%.*]], i64 999999 +; OPT-GFX10-NEXT: [[OUT_GEP:%.*]] = getelementptr inbounds i32, ptr [[OUT:%.*]], i64 999999 ; OPT-GFX10-NEXT: [[CMP0:%.*]] = icmp eq i32 [[COND:%.*]], 0 ; OPT-GFX10-NEXT: br i1 [[CMP0]], label [[ENDIF:%.*]], label [[IF:%.*]] ; OPT-GFX10: if: ; OPT-GFX10-NEXT: [[TMP0:%.*]] = addrspacecast ptr [[IN:%.*]] to ptr addrspace(1) -; OPT-GFX10-NEXT: [[SUNKADDR:%.*]] = getelementptr i8, ptr addrspace(1) [[TMP0]], i64 28 +; OPT-GFX10-NEXT: [[SUNKADDR:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP0]], i64 28 ; OPT-GFX10-NEXT: [[LOAD:%.*]] = load i32, ptr addrspace(1) [[SUNKADDR]], align 4 ; OPT-GFX10-NEXT: br label [[ENDIF]] ; OPT-GFX10: endif: @@ -303,8 +303,8 @@ define void @test_sink_noop_addrspacecast_flat_to_global_i32(ptr %out, ptr %in, ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_setpc_b64 s[30:31] entry: - %out.gep = getelementptr i32, ptr %out, i64 999999 - %in.gep = getelementptr i32, ptr %in, i64 7 + %out.gep = getelementptr inbounds i32, ptr %out, i64 999999 + %in.gep = getelementptr inbounds i32, ptr %in, i64 7 %cast = addrspacecast ptr %in.gep to ptr addrspace(1) %cmp0 = icmp eq i32 %cond, 0 br i1 %cmp0, label %endif, label %if @@ -325,12 +325,12 @@ done: define void @test_sink_noop_addrspacecast_flat_to_constant_i32(ptr %out, ptr %in, i32 %cond) { ; OPT-LABEL: @test_sink_noop_addrspacecast_flat_to_constant_i32( ; OPT-NEXT: entry: -; OPT-NEXT: [[OUT_GEP:%.*]] = getelementptr i32, ptr [[OUT:%.*]], i64 999999 +; OPT-NEXT: [[OUT_GEP:%.*]] = getelementptr inbounds i32, ptr [[OUT:%.*]], i64 999999 ; OPT-NEXT: [[CMP0:%.*]] = icmp eq i32 [[COND:%.*]], 0 ; OPT-NEXT: br i1 [[CMP0]], label [[ENDIF:%.*]], label [[IF:%.*]] ; OPT: if: ; OPT-NEXT: [[TMP0:%.*]] = addrspacecast ptr [[IN:%.*]] to ptr addrspace(4) -; OPT-NEXT: [[SUNKADDR:%.*]] = getelementptr i8, ptr addrspace(4) [[TMP0]], i64 28 +; OPT-NEXT: [[SUNKADDR:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[TMP0]], i64 28 ; OPT-NEXT: [[LOAD:%.*]] = load i32, ptr addrspace(4) [[SUNKADDR]], align 4 ; OPT-NEXT: br label [[ENDIF]] ; OPT: endif: @@ -416,8 +416,8 @@ define void @test_sink_noop_addrspacecast_flat_to_constant_i32(ptr %out, ptr %in ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_setpc_b64 s[30:31] entry: - %out.gep = getelementptr i32, ptr %out, i64 999999 - %in.gep = getelementptr i32, ptr %in, i64 7 + %out.gep = getelementptr inbounds i32, ptr %out, i64 999999 + %in.gep = getelementptr inbounds i32, ptr %in, i64 7 %cast = addrspacecast ptr %in.gep to ptr addrspace(4) %cmp0 = icmp eq i32 %cond, 0 br i1 %cmp0, label %endif, label %if @@ -438,8 +438,8 @@ done: define void @test_sink_flat_small_max_flat_offset(ptr %out, ptr %in) #1 { ; OPT-GFX7-LABEL: @test_sink_flat_small_max_flat_offset( ; OPT-GFX7-NEXT: entry: -; OPT-GFX7-NEXT: [[OUT_GEP:%.*]] = getelementptr i32, ptr [[OUT:%.*]], i32 1024 -; OPT-GFX7-NEXT: [[IN_GEP:%.*]] = getelementptr i8, ptr [[IN:%.*]], i64 4095 +; OPT-GFX7-NEXT: [[OUT_GEP:%.*]] = getelementptr inbounds i32, ptr [[OUT:%.*]], i32 1024 +; OPT-GFX7-NEXT: [[IN_GEP:%.*]] = getelementptr inbounds i8, ptr [[IN:%.*]], i64 4095 ; OPT-GFX7-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #[[ATTR3:[0-9]+]] ; OPT-GFX7-NEXT: [[CMP0:%.*]] = icmp eq i32 [[TID]], 0 ; OPT-GFX7-NEXT: br i1 [[CMP0]], label [[ENDIF:%.*]], label [[IF:%.*]] @@ -456,8 +456,8 @@ define void @test_sink_flat_small_max_flat_offset(ptr %out, ptr %in) #1 { ; ; OPT-GFX8-LABEL: @test_sink_flat_small_max_flat_offset( ; OPT-GFX8-NEXT: entry: -; OPT-GFX8-NEXT: [[OUT_GEP:%.*]] = getelementptr i32, ptr [[OUT:%.*]], i32 1024 -; OPT-GFX8-NEXT: [[IN_GEP:%.*]] = getelementptr i8, ptr [[IN:%.*]], i64 4095 +; OPT-GFX8-NEXT: [[OUT_GEP:%.*]] = getelementptr inbounds i32, ptr [[OUT:%.*]], i32 1024 +; OPT-GFX8-NEXT: [[IN_GEP:%.*]] = getelementptr inbounds i8, ptr [[IN:%.*]], i64 4095 ; OPT-GFX8-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #[[ATTR3:[0-9]+]] ; OPT-GFX8-NEXT: [[CMP0:%.*]] = icmp eq i32 [[TID]], 0 ; OPT-GFX8-NEXT: br i1 [[CMP0]], label [[ENDIF:%.*]], label [[IF:%.*]] @@ -474,12 +474,12 @@ define void @test_sink_flat_small_max_flat_offset(ptr %out, ptr %in) #1 { ; ; OPT-GFX9-LABEL: @test_sink_flat_small_max_flat_offset( ; OPT-GFX9-NEXT: entry: -; OPT-GFX9-NEXT: [[OUT_GEP:%.*]] = getelementptr i32, ptr [[OUT:%.*]], i32 1024 +; OPT-GFX9-NEXT: [[OUT_GEP:%.*]] = getelementptr inbounds i32, ptr [[OUT:%.*]], i32 1024 ; OPT-GFX9-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #[[ATTR3:[0-9]+]] ; OPT-GFX9-NEXT: [[CMP0:%.*]] = icmp eq i32 [[TID]], 0 ; OPT-GFX9-NEXT: br i1 [[CMP0]], label [[ENDIF:%.*]], label [[IF:%.*]] ; OPT-GFX9: if: -; OPT-GFX9-NEXT: [[SUNKADDR:%.*]] = getelementptr i8, ptr [[IN:%.*]], i64 4095 +; OPT-GFX9-NEXT: [[SUNKADDR:%.*]] = getelementptr inbounds i8, ptr [[IN:%.*]], i64 4095 ; OPT-GFX9-NEXT: [[LOAD:%.*]] = load i8, ptr [[SUNKADDR]], align 1 ; OPT-GFX9-NEXT: [[CAST:%.*]] = sext i8 [[LOAD]] to i32 ; OPT-GFX9-NEXT: br label [[ENDIF]] @@ -490,8 +490,8 @@ define void @test_sink_flat_small_max_flat_offset(ptr %out, ptr %in) #1 { ; ; OPT-GFX10-LABEL: @test_sink_flat_small_max_flat_offset( ; OPT-GFX10-NEXT: entry: -; OPT-GFX10-NEXT: [[OUT_GEP:%.*]] = getelementptr i32, ptr [[OUT:%.*]], i32 1024 -; OPT-GFX10-NEXT: [[IN_GEP:%.*]] = getelementptr i8, ptr [[IN:%.*]], i64 4095 +; OPT-GFX10-NEXT: [[OUT_GEP:%.*]] = getelementptr inbounds i32, ptr [[OUT:%.*]], i32 1024 +; OPT-GFX10-NEXT: [[IN_GEP:%.*]] = getelementptr inbounds i8, ptr [[IN:%.*]], i64 4095 ; OPT-GFX10-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #[[ATTR3:[0-9]+]] ; OPT-GFX10-NEXT: [[CMP0:%.*]] = icmp eq i32 [[TID]], 0 ; OPT-GFX10-NEXT: br i1 [[CMP0]], label [[ENDIF:%.*]], label [[IF:%.*]] @@ -588,8 +588,8 @@ define void @test_sink_flat_small_max_flat_offset(ptr %out, ptr %in) #1 { ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_setpc_b64 s[30:31] entry: - %out.gep = getelementptr i32, ptr %out, i32 1024 - %in.gep = getelementptr i8, ptr %in, i64 4095 + %out.gep = getelementptr inbounds i32, ptr %out, i32 1024 + %in.gep = getelementptr inbounds i8, ptr %in, i64 4095 %tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0 %cmp0 = icmp eq i32 %tid, 0 br i1 %cmp0, label %endif, label %if @@ -611,8 +611,8 @@ done: define void @test_sink_flat_small_max_plus_1_flat_offset(ptr %out, ptr %in) #1 { ; OPT-LABEL: @test_sink_flat_small_max_plus_1_flat_offset( ; OPT-NEXT: entry: -; OPT-NEXT: [[OUT_GEP:%.*]] = getelementptr i32, ptr [[OUT:%.*]], i64 99999 -; OPT-NEXT: [[IN_GEP:%.*]] = getelementptr i8, ptr [[IN:%.*]], i64 4096 +; OPT-NEXT: [[OUT_GEP:%.*]] = getelementptr inbounds i32, ptr [[OUT:%.*]], i64 99999 +; OPT-NEXT: [[IN_GEP:%.*]] = getelementptr inbounds i8, ptr [[IN:%.*]], i64 4096 ; OPT-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #[[ATTR3:[0-9]+]] ; OPT-NEXT: [[CMP0:%.*]] = icmp eq i32 [[TID]], 0 ; OPT-NEXT: br i1 [[CMP0]], label [[ENDIF:%.*]], label [[IF:%.*]] @@ -711,8 +711,8 @@ define void @test_sink_flat_small_max_plus_1_flat_offset(ptr %out, ptr %in) #1 { ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_setpc_b64 s[30:31] entry: - %out.gep = getelementptr i32, ptr %out, i64 99999 - %in.gep = getelementptr i8, ptr %in, i64 4096 + %out.gep = getelementptr inbounds i32, ptr %out, i64 99999 + %in.gep = getelementptr inbounds i8, ptr %in, i64 4096 %tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0 %cmp0 = icmp eq i32 %tid, 0 br i1 %cmp0, label %endif, label %if @@ -734,8 +734,8 @@ done: define void @test_sinkable_flat_reg_offset(ptr %out, ptr %in, i64 %reg) #1 { ; OPT-LABEL: @test_sinkable_flat_reg_offset( ; OPT-NEXT: entry: -; OPT-NEXT: [[OUT_GEP:%.*]] = getelementptr i32, ptr [[OUT:%.*]], i32 1024 -; OPT-NEXT: [[IN_GEP:%.*]] = getelementptr i8, ptr [[IN:%.*]], i64 [[REG:%.*]] +; OPT-NEXT: [[OUT_GEP:%.*]] = getelementptr inbounds i32, ptr [[OUT:%.*]], i32 1024 +; OPT-NEXT: [[IN_GEP:%.*]] = getelementptr inbounds i8, ptr [[IN:%.*]], i64 [[REG:%.*]] ; OPT-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #[[ATTR3]] ; OPT-NEXT: [[CMP0:%.*]] = icmp eq i32 [[TID]], 0 ; OPT-NEXT: br i1 [[CMP0]], label [[ENDIF:%.*]], label [[IF:%.*]] @@ -834,8 +834,8 @@ define void @test_sinkable_flat_reg_offset(ptr %out, ptr %in, i64 %reg) #1 { ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_setpc_b64 s[30:31] entry: - %out.gep = getelementptr i32, ptr %out, i32 1024 - %in.gep = getelementptr i8, ptr %in, i64 %reg + %out.gep = getelementptr inbounds i32, ptr %out, i32 1024 + %in.gep = getelementptr inbounds i8, ptr %in, i64 %reg %tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0 %cmp0 = icmp eq i32 %tid, 0 br i1 %cmp0, label %endif, label %if diff --git a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll index fdc15a301164a..77dd169c69e07 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll @@ -371,7 +371,7 @@ define float @flat_agent_atomic_fadd_ret_f32__offset12b_pos__amdgpu_no_fine_grai ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %result = atomicrmw fadd ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 ret float %result } @@ -567,7 +567,7 @@ define float @flat_agent_atomic_fadd_ret_f32__offset12b_neg__amdgpu_no_fine_grai ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 -512 + %gep = getelementptr inbounds float, ptr %ptr, i64 -512 %result = atomicrmw fadd ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 ret float %result } @@ -992,7 +992,7 @@ define void @flat_agent_atomic_fadd_noret_f32__offset12b_pos__amdgpu_no_fine_gra ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fadd ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 ret void } @@ -1216,7 +1216,7 @@ define void @flat_agent_atomic_fadd_noret_f32__offset12b_neg__amdgpu_no_fine_gra ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 -512 + %gep = getelementptr inbounds float, ptr %ptr, i64 -512 %unused = atomicrmw fadd ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 ret void } @@ -1406,7 +1406,7 @@ define float @flat_system_atomic_fadd_ret_f32__offset12b_pos__amdgpu_no_fine_gra ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %result = atomicrmw fadd ptr %gep, float %val seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 ret float %result } @@ -1627,7 +1627,7 @@ define void @flat_system_atomic_fadd_noret_f32__offset12b_pos__amdgpu_no_fine_gr ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fadd ptr %gep, float %val seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 ret void } @@ -1792,7 +1792,7 @@ define void @flat_agent_atomic_fadd_noret_f32_maybe_remote(ptr %ptr, float %val) ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fadd ptr %gep, float %val syncscope("agent") seq_cst ret void } @@ -1944,7 +1944,7 @@ define void @flat_agent_atomic_fadd_noret_f32___amdgpu_no_fine_grained_memory(pt ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fadd ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -2162,7 +2162,7 @@ define void @flat_agent_atomic_fadd_noret_f32___amdgpu_no_fine_grained_memory__a ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fadd ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 ret void } @@ -2327,7 +2327,7 @@ define void @flat_agent_atomic_fadd_noret_f32_amdgpu_ignore_denormal_mode(ptr %p ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fadd ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.ignore.denormal.mode !0 ret void } @@ -2695,7 +2695,7 @@ define float @flat_agent_atomic_fadd_ret_f32__offset12b_pos__ftz__amdgpu_no_fine ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %result = atomicrmw fadd ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret float %result } @@ -2891,7 +2891,7 @@ define float @flat_agent_atomic_fadd_ret_f32__offset12b_neg__ftz__amdgpu_no_fine ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 -512 + %gep = getelementptr inbounds float, ptr %ptr, i64 -512 %result = atomicrmw fadd ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret float %result } @@ -3316,7 +3316,7 @@ define void @flat_agent_atomic_fadd_noret_f32__offset12b_pos__ftz__amdgpu_no_fin ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fadd ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -3540,7 +3540,7 @@ define void @flat_agent_atomic_fadd_noret_f32__offset12b_neg__ftz__amdgpu_no_fin ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 -512 + %gep = getelementptr inbounds float, ptr %ptr, i64 -512 %unused = atomicrmw fadd ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -3730,7 +3730,7 @@ define float @flat_system_atomic_fadd_ret_f32__offset12b_pos__ftz__amdgpu_no_fin ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %result = atomicrmw fadd ptr %gep, float %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret float %result } @@ -3951,7 +3951,7 @@ define void @flat_system_atomic_fadd_noret_f32__offset12b_pos__ftz__amdgpu_no_fi ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fadd ptr %gep, float %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -4141,7 +4141,7 @@ define float @flat_agent_atomic_fadd_ret_f32__ieee__amdgpu_no_fine_grained_memor ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %result = atomicrmw fadd ptr %gep, float %val seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 ret float %result } @@ -4362,7 +4362,7 @@ define void @flat_agent_atomic_fadd_noret_f32__ieee__amdgpu_no_fine_grained_memo ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fadd ptr %gep, float %val seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0 ret void } @@ -10099,7 +10099,7 @@ define void @flat_agent_atomic_fadd_noret_f16__offset12b__align4_pos__amdgpu_no_ ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr half, ptr %ptr, i64 1023 + %gep = getelementptr inbounds half, ptr %ptr, i64 1023 %unused = atomicrmw fadd ptr %gep, half %val syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0 ret void } @@ -10322,7 +10322,7 @@ define half @flat_agent_atomic_fadd_ret_f16__offset12b_pos__align4__amdgpu_no_fi ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr half, ptr %ptr, i64 1023 + %gep = getelementptr inbounds half, ptr %ptr, i64 1023 %result = atomicrmw fadd ptr %gep, half %val syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0 ret half %result } @@ -12948,7 +12948,7 @@ define bfloat @flat_agent_atomic_fadd_ret_bf16__offset12b_pos__align4__amdgpu_no ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v3 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr bfloat, ptr %ptr, i64 1023 + %gep = getelementptr inbounds bfloat, ptr %ptr, i64 1023 %result = atomicrmw fadd ptr %gep, bfloat %val syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0 ret bfloat %result } @@ -13226,7 +13226,7 @@ define void @flat_agent_atomic_fadd_noret_bf16__offset12b__align4_pos__amdgpu_no ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr bfloat, ptr %ptr, i64 1023 + %gep = getelementptr inbounds bfloat, ptr %ptr, i64 1023 %unused = atomicrmw fadd ptr %gep, bfloat %val syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0 ret void } @@ -14646,7 +14646,7 @@ define <2 x half> @flat_agent_atomic_fadd_ret_v2f16__offset12b_pos__amdgpu_no_fi ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 511 %result = atomicrmw fadd ptr %gep, <2 x half> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret <2 x half> %result } @@ -14851,7 +14851,7 @@ define <2 x half> @flat_agent_atomic_fadd_ret_v2f16__offset12b_neg__amdgpu_no_fi ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 -512 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 -512 %result = atomicrmw fadd ptr %gep, <2 x half> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret <2 x half> %result } @@ -15218,7 +15218,7 @@ define void @flat_agent_atomic_fadd_noret_v2f16__offset12b_pos__amdgpu_no_fine_g ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 511 %unused = atomicrmw fadd ptr %gep, <2 x half> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -15421,7 +15421,7 @@ define void @flat_agent_atomic_fadd_noret_v2f16__offset12b_neg__amdgpu_no_fine_g ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 -512 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 -512 %unused = atomicrmw fadd ptr %gep, <2 x half> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -15615,7 +15615,7 @@ define <2 x half> @flat_system_atomic_fadd_ret_v2f16__offset12b_pos__amdgpu_no_f ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 511 %result = atomicrmw fadd ptr %gep, <2 x half> %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret <2 x half> %result } @@ -15805,7 +15805,7 @@ define void @flat_system_atomic_fadd_noret_v2f16__offset12b_pos__amdgpu_no_fine_ ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 511 %unused = atomicrmw fadd ptr %gep, <2 x half> %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -17100,7 +17100,7 @@ define <2 x bfloat> @flat_agent_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_no ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 511 %result = atomicrmw fadd ptr %gep, <2 x bfloat> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret <2 x bfloat> %result } @@ -17393,7 +17393,7 @@ define <2 x bfloat> @flat_agent_atomic_fadd_ret_v2bf16__offset12b_neg__amdgpu_no ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 -512 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 -512 %result = atomicrmw fadd ptr %gep, <2 x bfloat> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret <2 x bfloat> %result } @@ -17936,7 +17936,7 @@ define void @flat_agent_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine_ ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 511 %unused = atomicrmw fadd ptr %gep, <2 x bfloat> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -18227,7 +18227,7 @@ define void @flat_agent_atomic_fadd_noret_v2bf16__offset12b_neg__amdgpu_no_fine_ ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 -512 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 -512 %unused = atomicrmw fadd ptr %gep, <2 x bfloat> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -18509,7 +18509,7 @@ define <2 x bfloat> @flat_system_atomic_fadd_ret_v2bf16__offset12b_pos__amdgpu_n ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 511 %result = atomicrmw fadd ptr %gep, <2 x bfloat> %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret <2 x bfloat> %result } @@ -18787,7 +18787,7 @@ define void @flat_system_atomic_fadd_noret_v2bf16__offset12b_pos__amdgpu_no_fine ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 511 %unused = atomicrmw fadd ptr %gep, <2 x bfloat> %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } diff --git a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmax.ll b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmax.ll index b29a5d0920030..68ebcfb8af2f3 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmax.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmax.ll @@ -299,7 +299,7 @@ define float @flat_agent_atomic_fmax_ret_f32__offset12b_pos__amdgpu_no_fine_grai ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %result = atomicrmw fmax ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret float %result } @@ -464,7 +464,7 @@ define float @flat_agent_atomic_fmax_ret_f32__offset12b_neg__amdgpu_no_fine_grai ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 -512 + %gep = getelementptr inbounds float, ptr %ptr, i64 -512 %result = atomicrmw fmax ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret float %result } @@ -753,7 +753,7 @@ define void @flat_agent_atomic_fmax_noret_f32__offset12b_pos__amdgpu_no_fine_gra ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fmax ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -918,7 +918,7 @@ define void @flat_agent_atomic_fmax_noret_f32__offset12b_neg__amdgpu_no_fine_gra ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 -512 + %gep = getelementptr inbounds float, ptr %ptr, i64 -512 %unused = atomicrmw fmax ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -1070,7 +1070,7 @@ define float @flat_system_atomic_fmax_ret_f32__offset12b_pos__amdgpu_no_fine_gra ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %result = atomicrmw fmax ptr %gep, float %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret float %result } @@ -1221,7 +1221,7 @@ define void @flat_system_atomic_fmax_noret_f32__offset12b_pos__amdgpu_no_fine_gr ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fmax ptr %gep, float %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -1853,7 +1853,7 @@ define float @flat_agent_atomic_fmax_ret_f32__offset12b_pos__ftz__amdgpu_no_fine ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %result = atomicrmw fmax ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret float %result } @@ -2018,7 +2018,7 @@ define float @flat_agent_atomic_fmax_ret_f32__offset12b_neg__ftz__amdgpu_no_fine ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 -512 + %gep = getelementptr inbounds float, ptr %ptr, i64 -512 %result = atomicrmw fmax ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret float %result } @@ -2307,7 +2307,7 @@ define void @flat_agent_atomic_fmax_noret_f32__offset12b_pos__ftz__amdgpu_no_fin ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fmax ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -2472,7 +2472,7 @@ define void @flat_agent_atomic_fmax_noret_f32__offset12b_neg__ftz__amdgpu_no_fin ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 -512 + %gep = getelementptr inbounds float, ptr %ptr, i64 -512 %unused = atomicrmw fmax ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -2624,7 +2624,7 @@ define float @flat_system_atomic_fmax_ret_f32__offset12b_pos__ftz__amdgpu_no_fin ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %result = atomicrmw fmax ptr %gep, float %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret float %result } @@ -2775,7 +2775,7 @@ define void @flat_system_atomic_fmax_noret_f32__offset12b_pos__ftz__amdgpu_no_fi ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fmax ptr %gep, float %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -8083,7 +8083,7 @@ define half @flat_agent_atomic_fmax_ret_f16__offset12b_pos__align4__amdgpu_no_fi ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr half, ptr %ptr, i64 1023 + %gep = getelementptr inbounds half, ptr %ptr, i64 1023 %result = atomicrmw fmax ptr %gep, half %val syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0 ret half %result } @@ -8314,7 +8314,7 @@ define void @flat_agent_atomic_fmax_noret_f16__offset12b__align4_pos__amdgpu_no_ ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr half, ptr %ptr, i64 1023 + %gep = getelementptr inbounds half, ptr %ptr, i64 1023 %unused = atomicrmw fmax ptr %gep, half %val syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0 ret void } @@ -11309,7 +11309,7 @@ define bfloat @flat_agent_atomic_fmax_ret_bf16__offset12b_pos__align4__amdgpu_no ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v3 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr bfloat, ptr %ptr, i64 1023 + %gep = getelementptr inbounds bfloat, ptr %ptr, i64 1023 %result = atomicrmw fmax ptr %gep, bfloat %val syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0 ret bfloat %result } @@ -11588,7 +11588,7 @@ define void @flat_agent_atomic_fmax_noret_bf16__offset12b__align4_pos__amdgpu_no ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr bfloat, ptr %ptr, i64 1023 + %gep = getelementptr inbounds bfloat, ptr %ptr, i64 1023 %unused = atomicrmw fmax ptr %gep, bfloat %val syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0 ret void } @@ -12774,7 +12774,7 @@ define <2 x half> @flat_agent_atomic_fmax_ret_v2f16__offset12b_pos__amdgpu_no_fi ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 511 %result = atomicrmw fmax ptr %gep, <2 x half> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret <2 x half> %result } @@ -13031,7 +13031,7 @@ define <2 x half> @flat_agent_atomic_fmax_ret_v2f16__offset12b_neg__amdgpu_no_fi ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 -512 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 -512 %result = atomicrmw fmax ptr %gep, <2 x half> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret <2 x half> %result } @@ -13492,7 +13492,7 @@ define void @flat_agent_atomic_fmax_noret_v2f16__offset12b_pos__amdgpu_no_fine_g ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 511 %unused = atomicrmw fmax ptr %gep, <2 x half> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -13745,7 +13745,7 @@ define void @flat_agent_atomic_fmax_noret_v2f16__offset12b_neg__amdgpu_no_fine_g ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 -512 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 -512 %unused = atomicrmw fmax ptr %gep, <2 x half> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -13987,7 +13987,7 @@ define <2 x half> @flat_system_atomic_fmax_ret_v2f16__offset12b_pos__amdgpu_no_f ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 511 %result = atomicrmw fmax ptr %gep, <2 x half> %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret <2 x half> %result } @@ -14224,7 +14224,7 @@ define void @flat_system_atomic_fmax_noret_v2f16__offset12b_pos__amdgpu_no_fine_ ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 511 %unused = atomicrmw fmax ptr %gep, <2 x half> %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -14925,7 +14925,7 @@ define <2 x bfloat> @flat_agent_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_no ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 511 %result = atomicrmw fmax ptr %gep, <2 x bfloat> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret <2 x bfloat> %result } @@ -15293,7 +15293,7 @@ define <2 x bfloat> @flat_agent_atomic_fmax_ret_v2bf16__offset12b_neg__amdgpu_no ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 -512 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 -512 %result = atomicrmw fmax ptr %gep, <2 x bfloat> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret <2 x bfloat> %result } @@ -15972,7 +15972,7 @@ define void @flat_agent_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine_ ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 511 %unused = atomicrmw fmax ptr %gep, <2 x bfloat> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -16334,7 +16334,7 @@ define void @flat_agent_atomic_fmax_noret_v2bf16__offset12b_neg__amdgpu_no_fine_ ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 -512 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 -512 %unused = atomicrmw fmax ptr %gep, <2 x bfloat> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -16687,7 +16687,7 @@ define <2 x bfloat> @flat_system_atomic_fmax_ret_v2bf16__offset12b_pos__amdgpu_n ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 511 %result = atomicrmw fmax ptr %gep, <2 x bfloat> %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret <2 x bfloat> %result } @@ -17033,7 +17033,7 @@ define void @flat_system_atomic_fmax_noret_v2bf16__offset12b_pos__amdgpu_no_fine ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 511 %unused = atomicrmw fmax ptr %gep, <2 x bfloat> %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } diff --git a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmin.ll b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmin.ll index 9b682179aa279..d7656045f1d81 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmin.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fmin.ll @@ -299,7 +299,7 @@ define float @flat_agent_atomic_fmin_ret_f32__offset12b_pos__amdgpu_no_fine_grai ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %result = atomicrmw fmin ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret float %result } @@ -464,7 +464,7 @@ define float @flat_agent_atomic_fmin_ret_f32__offset12b_neg__amdgpu_no_fine_grai ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 -512 + %gep = getelementptr inbounds float, ptr %ptr, i64 -512 %result = atomicrmw fmin ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret float %result } @@ -753,7 +753,7 @@ define void @flat_agent_atomic_fmin_noret_f32__offset12b_pos__amdgpu_no_fine_gra ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fmin ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -918,7 +918,7 @@ define void @flat_agent_atomic_fmin_noret_f32__offset12b_neg__amdgpu_no_fine_gra ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 -512 + %gep = getelementptr inbounds float, ptr %ptr, i64 -512 %unused = atomicrmw fmin ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -1070,7 +1070,7 @@ define float @flat_system_atomic_fmin_ret_f32__offset12b_pos__amdgpu_no_fine_gra ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %result = atomicrmw fmin ptr %gep, float %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret float %result } @@ -1221,7 +1221,7 @@ define void @flat_system_atomic_fmin_noret_f32__offset12b_pos__amdgpu_no_fine_gr ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fmin ptr %gep, float %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -1853,7 +1853,7 @@ define float @flat_agent_atomic_fmin_ret_f32__offset12b_pos__ftz__amdgpu_no_fine ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %result = atomicrmw fmin ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret float %result } @@ -2018,7 +2018,7 @@ define float @flat_agent_atomic_fmin_ret_f32__offset12b_neg__ftz__amdgpu_no_fine ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 -512 + %gep = getelementptr inbounds float, ptr %ptr, i64 -512 %result = atomicrmw fmin ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret float %result } @@ -2307,7 +2307,7 @@ define void @flat_agent_atomic_fmin_noret_f32__offset12b_pos__ftz__amdgpu_no_fin ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fmin ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -2472,7 +2472,7 @@ define void @flat_agent_atomic_fmin_noret_f32__offset12b_neg__ftz__amdgpu_no_fin ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 -512 + %gep = getelementptr inbounds float, ptr %ptr, i64 -512 %unused = atomicrmw fmin ptr %gep, float %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -2624,7 +2624,7 @@ define float @flat_system_atomic_fmin_ret_f32__offset12b_pos__ftz__amdgpu_no_fin ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %result = atomicrmw fmin ptr %gep, float %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret float %result } @@ -2775,7 +2775,7 @@ define void @flat_system_atomic_fmin_noret_f32__offset12b_pos__ftz__amdgpu_no_fi ; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fmin ptr %gep, float %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -8083,7 +8083,7 @@ define half @flat_agent_atomic_fmin_ret_f16__offset12b_pos__align4__amdgpu_no_fi ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr half, ptr %ptr, i64 1023 + %gep = getelementptr inbounds half, ptr %ptr, i64 1023 %result = atomicrmw fmin ptr %gep, half %val syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0 ret half %result } @@ -8314,7 +8314,7 @@ define void @flat_agent_atomic_fmin_noret_f16__offset12b__align4_pos__amdgpu_no_ ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr half, ptr %ptr, i64 1023 + %gep = getelementptr inbounds half, ptr %ptr, i64 1023 %unused = atomicrmw fmin ptr %gep, half %val syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0 ret void } @@ -11309,7 +11309,7 @@ define bfloat @flat_agent_atomic_fmin_ret_bf16__offset12b_pos__align4__amdgpu_no ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v3 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr bfloat, ptr %ptr, i64 1023 + %gep = getelementptr inbounds bfloat, ptr %ptr, i64 1023 %result = atomicrmw fmin ptr %gep, bfloat %val syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0 ret bfloat %result } @@ -11588,7 +11588,7 @@ define void @flat_agent_atomic_fmin_noret_bf16__offset12b__align4_pos__amdgpu_no ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr bfloat, ptr %ptr, i64 1023 + %gep = getelementptr inbounds bfloat, ptr %ptr, i64 1023 %unused = atomicrmw fmin ptr %gep, bfloat %val syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0 ret void } @@ -12774,7 +12774,7 @@ define <2 x half> @flat_agent_atomic_fmin_ret_v2f16__offset12b_pos__amdgpu_no_fi ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 511 %result = atomicrmw fmin ptr %gep, <2 x half> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret <2 x half> %result } @@ -13031,7 +13031,7 @@ define <2 x half> @flat_agent_atomic_fmin_ret_v2f16__offset12b_neg__amdgpu_no_fi ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 -512 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 -512 %result = atomicrmw fmin ptr %gep, <2 x half> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret <2 x half> %result } @@ -13492,7 +13492,7 @@ define void @flat_agent_atomic_fmin_noret_v2f16__offset12b_pos__amdgpu_no_fine_g ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 511 %unused = atomicrmw fmin ptr %gep, <2 x half> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -13745,7 +13745,7 @@ define void @flat_agent_atomic_fmin_noret_v2f16__offset12b_neg__amdgpu_no_fine_g ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 -512 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 -512 %unused = atomicrmw fmin ptr %gep, <2 x half> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -13987,7 +13987,7 @@ define <2 x half> @flat_system_atomic_fmin_ret_v2f16__offset12b_pos__amdgpu_no_f ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 511 %result = atomicrmw fmin ptr %gep, <2 x half> %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret <2 x half> %result } @@ -14224,7 +14224,7 @@ define void @flat_system_atomic_fmin_noret_v2f16__offset12b_pos__amdgpu_no_fine_ ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 511 %unused = atomicrmw fmin ptr %gep, <2 x half> %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -14925,7 +14925,7 @@ define <2 x bfloat> @flat_agent_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_no ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 511 %result = atomicrmw fmin ptr %gep, <2 x bfloat> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret <2 x bfloat> %result } @@ -15293,7 +15293,7 @@ define <2 x bfloat> @flat_agent_atomic_fmin_ret_v2bf16__offset12b_neg__amdgpu_no ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 -512 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 -512 %result = atomicrmw fmin ptr %gep, <2 x bfloat> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret <2 x bfloat> %result } @@ -15972,7 +15972,7 @@ define void @flat_agent_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine_ ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 511 %unused = atomicrmw fmin ptr %gep, <2 x bfloat> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -16334,7 +16334,7 @@ define void @flat_agent_atomic_fmin_noret_v2bf16__offset12b_neg__amdgpu_no_fine_ ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 -512 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 -512 %unused = atomicrmw fmin ptr %gep, <2 x bfloat> %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } @@ -16687,7 +16687,7 @@ define <2 x bfloat> @flat_system_atomic_fmin_ret_v2bf16__offset12b_pos__amdgpu_n ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 511 %result = atomicrmw fmin ptr %gep, <2 x bfloat> %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret <2 x bfloat> %result } @@ -17033,7 +17033,7 @@ define void @flat_system_atomic_fmin_noret_v2bf16__offset12b_pos__amdgpu_no_fine ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 511 %unused = atomicrmw fmin ptr %gep, <2 x bfloat> %val seq_cst, !amdgpu.no.fine.grained.memory !0 ret void } diff --git a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fsub.ll b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fsub.ll index 98d7d259562b0..406e43a989fc3 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fsub.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fsub.ll @@ -403,7 +403,7 @@ define float @flat_agent_atomic_fsub_ret_f32__offset12b_pos(ptr %ptr, float %val ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %result = atomicrmw fsub ptr %gep, float %val syncscope("agent") seq_cst ret float %result } @@ -621,7 +621,7 @@ define float @flat_agent_atomic_fsub_ret_f32__offset12b_neg(ptr %ptr, float %val ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 -512 + %gep = getelementptr inbounds float, ptr %ptr, i64 -512 %result = atomicrmw fsub ptr %gep, float %val syncscope("agent") seq_cst ret float %result } @@ -1000,7 +1000,7 @@ define void @flat_agent_atomic_fsub_noret_f32__offset12b_pos(ptr %ptr, float %va ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fsub ptr %gep, float %val syncscope("agent") seq_cst ret void } @@ -1212,7 +1212,7 @@ define void @flat_agent_atomic_fsub_noret_f32__offset12b_neg(ptr %ptr, float %va ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 -512 + %gep = getelementptr inbounds float, ptr %ptr, i64 -512 %unused = atomicrmw fsub ptr %gep, float %val syncscope("agent") seq_cst ret void } @@ -1415,7 +1415,7 @@ define float @flat_system_atomic_fsub_ret_f32__offset12b_pos(ptr %ptr, float %va ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %result = atomicrmw fsub ptr %gep, float %val seq_cst ret float %result } @@ -1611,7 +1611,7 @@ define void @flat_system_atomic_fsub_noret_f32__offset12b_pos(ptr %ptr, float %v ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fsub ptr %gep, float %val seq_cst ret void } @@ -2011,7 +2011,7 @@ define float @flat_agent_atomic_fsub_ret_f32__offset12b_pos__ftz(ptr %ptr, float ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %result = atomicrmw fsub ptr %gep, float %val syncscope("agent") seq_cst ret float %result } @@ -2229,7 +2229,7 @@ define float @flat_agent_atomic_fsub_ret_f32__offset12b_neg__ftz(ptr %ptr, float ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 -512 + %gep = getelementptr inbounds float, ptr %ptr, i64 -512 %result = atomicrmw fsub ptr %gep, float %val syncscope("agent") seq_cst ret float %result } @@ -2608,7 +2608,7 @@ define void @flat_agent_atomic_fsub_noret_f32__offset12b_pos__ftz(ptr %ptr, floa ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fsub ptr %gep, float %val syncscope("agent") seq_cst ret void } @@ -2820,7 +2820,7 @@ define void @flat_agent_atomic_fsub_noret_f32__offset12b_neg__ftz(ptr %ptr, floa ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 -512 + %gep = getelementptr inbounds float, ptr %ptr, i64 -512 %unused = atomicrmw fsub ptr %gep, float %val syncscope("agent") seq_cst ret void } @@ -3023,7 +3023,7 @@ define float @flat_system_atomic_fsub_ret_f32__offset12b_pos__ftz(ptr %ptr, floa ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %result = atomicrmw fsub ptr %gep, float %val seq_cst ret float %result } @@ -3219,7 +3219,7 @@ define void @flat_system_atomic_fsub_noret_f32__offset12b_pos__ftz(ptr %ptr, flo ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %ptr, i64 511 + %gep = getelementptr inbounds float, ptr %ptr, i64 511 %unused = atomicrmw fsub ptr %gep, float %val seq_cst ret void } @@ -7569,7 +7569,7 @@ define void @flat_agent_atomic_fsub_noret_f16__offset12b_neg(ptr %ptr, half %val ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr half, ptr %ptr, i64 -1024 + %gep = getelementptr inbounds half, ptr %ptr, i64 -1024 %unused = atomicrmw fsub ptr %gep, half %val syncscope("agent") seq_cst ret void } @@ -7792,7 +7792,7 @@ define half @flat_agent_atomic_fsub_ret_f16__offset12b_pos__align4(ptr %ptr, hal ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr half, ptr %ptr, i64 1023 + %gep = getelementptr inbounds half, ptr %ptr, i64 1023 %result = atomicrmw fsub ptr %gep, half %val syncscope("agent") seq_cst, align 4 ret half %result } @@ -8007,7 +8007,7 @@ define void @flat_agent_atomic_fsub_noret_f16__offset12b__align4_pos(ptr %ptr, h ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr half, ptr %ptr, i64 1023 + %gep = getelementptr inbounds half, ptr %ptr, i64 1023 %unused = atomicrmw fsub ptr %gep, half %val syncscope("agent") seq_cst, align 4 ret void } @@ -10965,7 +10965,7 @@ define bfloat @flat_agent_atomic_fsub_ret_bf16__offset12b_pos__align4(ptr %ptr, ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v3 ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr bfloat, ptr %ptr, i64 1023 + %gep = getelementptr inbounds bfloat, ptr %ptr, i64 1023 %result = atomicrmw fsub ptr %gep, bfloat %val syncscope("agent") seq_cst, align 4 ret bfloat %result } @@ -11243,7 +11243,7 @@ define void @flat_agent_atomic_fsub_noret_bf16__offset12b__align4_pos(ptr %ptr, ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr bfloat, ptr %ptr, i64 1023 + %gep = getelementptr inbounds bfloat, ptr %ptr, i64 1023 %unused = atomicrmw fsub ptr %gep, bfloat %val syncscope("agent") seq_cst, align 4 ret void } @@ -12393,7 +12393,7 @@ define <2 x half> @flat_agent_atomic_fsub_ret_v2f16__offset12b_pos(ptr %ptr, <2 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 511 %result = atomicrmw fsub ptr %gep, <2 x half> %val syncscope("agent") seq_cst ret <2 x half> %result } @@ -12633,7 +12633,7 @@ define <2 x half> @flat_agent_atomic_fsub_ret_v2f16__offset12b_neg(ptr %ptr, <2 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 -512 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 -512 %result = atomicrmw fsub ptr %gep, <2 x half> %val syncscope("agent") seq_cst ret <2 x half> %result } @@ -13056,7 +13056,7 @@ define void @flat_agent_atomic_fsub_noret_v2f16__offset12b_pos(ptr %ptr, <2 x ha ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 511 %unused = atomicrmw fsub ptr %gep, <2 x half> %val syncscope("agent") seq_cst ret void } @@ -13290,7 +13290,7 @@ define void @flat_agent_atomic_fsub_noret_v2f16__offset12b_neg(ptr %ptr, <2 x ha ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 -512 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 -512 %unused = atomicrmw fsub ptr %gep, <2 x half> %val syncscope("agent") seq_cst ret void } @@ -13515,7 +13515,7 @@ define <2 x half> @flat_system_atomic_fsub_ret_v2f16__offset12b_pos(ptr %ptr, <2 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 511 %result = atomicrmw fsub ptr %gep, <2 x half> %val seq_cst ret <2 x half> %result } @@ -13733,7 +13733,7 @@ define void @flat_system_atomic_fsub_noret_v2f16__offset12b_pos(ptr %ptr, <2 x h ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x half>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x half>, ptr %ptr, i64 511 %unused = atomicrmw fsub ptr %gep, <2 x half> %val seq_cst ret void } @@ -14434,7 +14434,7 @@ define <2 x bfloat> @flat_agent_atomic_fsub_ret_v2bf16__offset12b_pos(ptr %ptr, ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 511 %result = atomicrmw fsub ptr %gep, <2 x bfloat> %val syncscope("agent") seq_cst ret <2 x bfloat> %result } @@ -14802,7 +14802,7 @@ define <2 x bfloat> @flat_agent_atomic_fsub_ret_v2bf16__offset12b_neg(ptr %ptr, ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 -512 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 -512 %result = atomicrmw fsub ptr %gep, <2 x bfloat> %val syncscope("agent") seq_cst ret <2 x bfloat> %result } @@ -15481,7 +15481,7 @@ define void @flat_agent_atomic_fsub_noret_v2bf16__offset12b_pos(ptr %ptr, <2 x b ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 511 %unused = atomicrmw fsub ptr %gep, <2 x bfloat> %val syncscope("agent") seq_cst ret void } @@ -15843,7 +15843,7 @@ define void @flat_agent_atomic_fsub_noret_v2bf16__offset12b_neg(ptr %ptr, <2 x b ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 -512 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 -512 %unused = atomicrmw fsub ptr %gep, <2 x bfloat> %val syncscope("agent") seq_cst ret void } @@ -16196,7 +16196,7 @@ define <2 x bfloat> @flat_system_atomic_fsub_ret_v2bf16__offset12b_pos(ptr %ptr, ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 511 %result = atomicrmw fsub ptr %gep, <2 x bfloat> %val seq_cst ret <2 x bfloat> %result } @@ -16542,7 +16542,7 @@ define void @flat_system_atomic_fsub_noret_v2bf16__offset12b_pos(ptr %ptr, <2 x ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr <2 x bfloat>, ptr %ptr, i64 511 + %gep = getelementptr inbounds <2 x bfloat>, ptr %ptr, i64 511 %unused = atomicrmw fsub ptr %gep, <2 x bfloat> %val seq_cst ret void } diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics.ll index e674b57aae3ef..7d29d8d395d1d 100644 --- a/llvm/test/CodeGen/AMDGPU/flat_atomics.ll +++ b/llvm/test/CodeGen/AMDGPU/flat_atomics.ll @@ -47,7 +47,7 @@ define amdgpu_kernel void @atomic_add_i32_offset(ptr %out, i32 %in) { ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw add ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -96,7 +96,7 @@ define amdgpu_kernel void @atomic_add_i32_max_offset(ptr %out, i32 %in) { ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 1023 + %gep = getelementptr inbounds i32, ptr %out, i32 1023 %val = atomicrmw volatile add ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -147,7 +147,7 @@ define amdgpu_kernel void @atomic_add_i32_max_offset_p1(ptr %out, i32 %in) { ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 1024 + %gep = getelementptr inbounds i32, ptr %out, i32 1024 %val = atomicrmw volatile add ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -205,7 +205,7 @@ define amdgpu_kernel void @atomic_add_i32_ret_offset(ptr %out, ptr %out2, i32 %i ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile add ptr %gep, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -267,8 +267,8 @@ define amdgpu_kernel void @atomic_add_i32_addr64_offset(ptr %out, i32 %in, i64 % ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile add ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -338,8 +338,8 @@ define amdgpu_kernel void @atomic_add_i32_ret_addr64_offset(ptr %out, ptr %out2, ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile add ptr %gep, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -495,7 +495,7 @@ define amdgpu_kernel void @atomic_add_i32_addr64(ptr %out, i32 %in, i64 %index) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile add ptr %ptr, i32 %in syncscope("agent") seq_cst ret void } @@ -561,7 +561,7 @@ define amdgpu_kernel void @atomic_add_i32_ret_addr64(ptr %out, ptr %out2, i32 %i ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile add ptr %ptr, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -611,7 +611,7 @@ define amdgpu_kernel void @atomic_and_i32_offset(ptr %out, i32 %in) { ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile and ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -669,7 +669,7 @@ define amdgpu_kernel void @atomic_and_i32_ret_offset(ptr %out, ptr %out2, i32 %i ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile and ptr %gep, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -731,8 +731,8 @@ define amdgpu_kernel void @atomic_and_i32_addr64_offset(ptr %out, i32 %in, i64 % ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile and ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -802,8 +802,8 @@ define amdgpu_kernel void @atomic_and_i32_ret_addr64_offset(ptr %out, ptr %out2, ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile and ptr %gep, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -959,7 +959,7 @@ define amdgpu_kernel void @atomic_and_i32_addr64(ptr %out, i32 %in, i64 %index) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile and ptr %ptr, i32 %in syncscope("agent") seq_cst ret void } @@ -1025,7 +1025,7 @@ define amdgpu_kernel void @atomic_and_i32_ret_addr64(ptr %out, ptr %out2, i32 %i ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile and ptr %ptr, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -1075,7 +1075,7 @@ define amdgpu_kernel void @atomic_sub_i32_offset(ptr %out, i32 %in) { ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile sub ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -1133,7 +1133,7 @@ define amdgpu_kernel void @atomic_sub_i32_ret_offset(ptr %out, ptr %out2, i32 %i ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile sub ptr %gep, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -1195,8 +1195,8 @@ define amdgpu_kernel void @atomic_sub_i32_addr64_offset(ptr %out, i32 %in, i64 % ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile sub ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -1266,8 +1266,8 @@ define amdgpu_kernel void @atomic_sub_i32_ret_addr64_offset(ptr %out, ptr %out2, ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile sub ptr %gep, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -1423,7 +1423,7 @@ define amdgpu_kernel void @atomic_sub_i32_addr64(ptr %out, i32 %in, i64 %index) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile sub ptr %ptr, i32 %in syncscope("agent") seq_cst ret void } @@ -1489,7 +1489,7 @@ define amdgpu_kernel void @atomic_sub_i32_ret_addr64(ptr %out, ptr %out2, i32 %i ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile sub ptr %ptr, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -1536,7 +1536,7 @@ define amdgpu_kernel void @atomic_max_i32_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile max ptr %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -1594,7 +1594,7 @@ define amdgpu_kernel void @atomic_max_i32_ret_offset(ptr %out, ptr %out2, i32 %i ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile max ptr %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, ptr %out2 ret void @@ -1653,8 +1653,8 @@ define amdgpu_kernel void @atomic_max_i32_addr64_offset(ptr %out, i32 %in, i64 % ; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile max ptr %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -1724,8 +1724,8 @@ define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(ptr %out, ptr %out2, ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile max ptr %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, ptr %out2 ret void @@ -1875,7 +1875,7 @@ define amdgpu_kernel void @atomic_max_i32_addr64(ptr %out, i32 %in, i64 %index) ; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile max ptr %ptr, i32 %in syncscope("workgroup") seq_cst ret void } @@ -1941,7 +1941,7 @@ define amdgpu_kernel void @atomic_max_i32_ret_addr64(ptr %out, ptr %out2, i32 %i ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile max ptr %ptr, i32 %in syncscope("workgroup") seq_cst store i32 %val, ptr %out2 ret void @@ -1988,7 +1988,7 @@ define amdgpu_kernel void @atomic_umax_i32_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile umax ptr %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2046,7 +2046,7 @@ define amdgpu_kernel void @atomic_umax_i32_ret_offset(ptr %out, ptr %out2, i32 % ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile umax ptr %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, ptr %out2 ret void @@ -2105,8 +2105,8 @@ define amdgpu_kernel void @atomic_umax_i32_addr64_offset(ptr %out, i32 %in, i64 ; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile umax ptr %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2176,8 +2176,8 @@ define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(ptr %out, ptr %out2 ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile umax ptr %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, ptr %out2 ret void @@ -2327,7 +2327,7 @@ define amdgpu_kernel void @atomic_umax_i32_addr64(ptr %out, i32 %in, i64 %index) ; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile umax ptr %ptr, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2393,7 +2393,7 @@ define amdgpu_kernel void @atomic_umax_i32_ret_addr64(ptr %out, ptr %out2, i32 % ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile umax ptr %ptr, i32 %in syncscope("workgroup") seq_cst store i32 %val, ptr %out2 ret void @@ -2440,7 +2440,7 @@ define amdgpu_kernel void @atomic_min_i32_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile min ptr %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2498,7 +2498,7 @@ define amdgpu_kernel void @atomic_min_i32_ret_offset(ptr %out, ptr %out2, i32 %i ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile min ptr %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, ptr %out2 ret void @@ -2557,8 +2557,8 @@ define amdgpu_kernel void @atomic_min_i32_addr64_offset(ptr %out, i32 %in, i64 % ; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile min ptr %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2628,8 +2628,8 @@ define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(ptr %out, ptr %out2, ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile min ptr %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, ptr %out2 ret void @@ -2779,7 +2779,7 @@ define amdgpu_kernel void @atomic_min_i32_addr64(ptr %out, i32 %in, i64 %index) ; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile min ptr %ptr, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2845,7 +2845,7 @@ define amdgpu_kernel void @atomic_min_i32_ret_addr64(ptr %out, ptr %out2, i32 %i ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile min ptr %ptr, i32 %in syncscope("workgroup") seq_cst store i32 %val, ptr %out2 ret void @@ -2892,7 +2892,7 @@ define amdgpu_kernel void @atomic_umin_i32_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile umin ptr %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2950,7 +2950,7 @@ define amdgpu_kernel void @atomic_umin_i32_ret_offset(ptr %out, ptr %out2, i32 % ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile umin ptr %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, ptr %out2 ret void @@ -3009,8 +3009,8 @@ define amdgpu_kernel void @atomic_umin_i32_addr64_offset(ptr %out, i32 %in, i64 ; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile umin ptr %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -3080,8 +3080,8 @@ define amdgpu_kernel void @atomic_umin_i32_ret_addr64_offset(ptr %out, ptr %out2 ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile umin ptr %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, ptr %out2 ret void @@ -3231,7 +3231,7 @@ define amdgpu_kernel void @atomic_umin_i32_addr64(ptr %out, i32 %in, i64 %index) ; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile umin ptr %ptr, i32 %in syncscope("workgroup") seq_cst ret void } @@ -3297,7 +3297,7 @@ define amdgpu_kernel void @atomic_umin_i32_ret_addr64(ptr %out, ptr %out2, i32 % ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile umin ptr %ptr, i32 %in syncscope("workgroup") seq_cst store i32 %val, ptr %out2 ret void @@ -3347,7 +3347,7 @@ define amdgpu_kernel void @atomic_or_i32_offset(ptr %out, i32 %in) { ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile or ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -3405,7 +3405,7 @@ define amdgpu_kernel void @atomic_or_i32_ret_offset(ptr %out, ptr %out2, i32 %in ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile or ptr %gep, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -3467,8 +3467,8 @@ define amdgpu_kernel void @atomic_or_i32_addr64_offset(ptr %out, i32 %in, i64 %i ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile or ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -3538,8 +3538,8 @@ define amdgpu_kernel void @atomic_or_i32_ret_addr64_offset(ptr %out, ptr %out2, ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile or ptr %gep, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -3695,7 +3695,7 @@ define amdgpu_kernel void @atomic_or_i32_addr64(ptr %out, i32 %in, i64 %index) { ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile or ptr %ptr, i32 %in syncscope("agent") seq_cst ret void } @@ -3761,7 +3761,7 @@ define amdgpu_kernel void @atomic_or_i32_ret_addr64(ptr %out, ptr %out2, i32 %in ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile or ptr %ptr, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -3811,7 +3811,7 @@ define amdgpu_kernel void @atomic_xchg_i32_offset(ptr %out, i32 %in) { ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile xchg ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -3860,7 +3860,7 @@ define amdgpu_kernel void @atomic_xchg_f32_offset(ptr %out, float %in) { ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr float, ptr %out, i32 4 + %gep = getelementptr inbounds float, ptr %out, i32 4 %val = atomicrmw volatile xchg ptr %gep, float %in syncscope("agent") seq_cst ret void } @@ -3918,7 +3918,7 @@ define amdgpu_kernel void @atomic_xchg_i32_ret_offset(ptr %out, ptr %out2, i32 % ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile xchg ptr %gep, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -3980,8 +3980,8 @@ define amdgpu_kernel void @atomic_xchg_i32_addr64_offset(ptr %out, i32 %in, i64 ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile xchg ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -4051,8 +4051,8 @@ define amdgpu_kernel void @atomic_xchg_i32_ret_addr64_offset(ptr %out, ptr %out2 ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile xchg ptr %gep, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -4208,7 +4208,7 @@ define amdgpu_kernel void @atomic_xchg_i32_addr64(ptr %out, i32 %in, i64 %index) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile xchg ptr %ptr, i32 %in syncscope("agent") seq_cst ret void } @@ -4274,7 +4274,7 @@ define amdgpu_kernel void @atomic_xchg_i32_ret_addr64(ptr %out, ptr %out2, i32 % ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile xchg ptr %ptr, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -4326,7 +4326,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i32_offset(ptr %out, i32 %in, i32 %old ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst ret void } @@ -4387,7 +4387,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i32_ret_offset(ptr %out, ptr %out2, i3 ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst %flag = extractvalue { i32, i1 } %val, 0 store i32 %flag, ptr %out2 @@ -4456,8 +4456,8 @@ define amdgpu_kernel void @atomic_cmpxchg_i32_addr64_offset(ptr %out, i32 %in, i ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst ret void } @@ -4533,8 +4533,8 @@ define amdgpu_kernel void @atomic_cmpxchg_i32_ret_addr64_offset(ptr %out, ptr %o ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst %flag = extractvalue { i32, i1 } %val, 0 store i32 %flag, ptr %out2 @@ -4701,7 +4701,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i32_addr64(ptr %out, i32 %in, i64 %ind ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = cmpxchg volatile ptr %ptr, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst ret void } @@ -4773,7 +4773,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i32_ret_addr64(ptr %out, ptr %out2, i3 ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = cmpxchg volatile ptr %ptr, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst %flag = extractvalue { i32, i1 } %val, 0 store i32 %flag, ptr %out2 @@ -4824,7 +4824,7 @@ define amdgpu_kernel void @atomic_xor_i32_offset(ptr %out, i32 %in) { ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile xor ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -4882,7 +4882,7 @@ define amdgpu_kernel void @atomic_xor_i32_ret_offset(ptr %out, ptr %out2, i32 %i ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile xor ptr %gep, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -4944,8 +4944,8 @@ define amdgpu_kernel void @atomic_xor_i32_addr64_offset(ptr %out, i32 %in, i64 % ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile xor ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -5015,8 +5015,8 @@ define amdgpu_kernel void @atomic_xor_i32_ret_addr64_offset(ptr %out, ptr %out2, ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile xor ptr %gep, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -5172,7 +5172,7 @@ define amdgpu_kernel void @atomic_xor_i32_addr64(ptr %out, i32 %in, i64 %index) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile xor ptr %ptr, i32 %in syncscope("agent") seq_cst ret void } @@ -5238,7 +5238,7 @@ define amdgpu_kernel void @atomic_xor_i32_ret_addr64(ptr %out, ptr %out2, i32 %i ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile xor ptr %ptr, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -5291,7 +5291,7 @@ define amdgpu_kernel void @atomic_load_i32_offset(ptr %in, ptr %out) { ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %in, i32 4 + %gep = getelementptr inbounds i32, ptr %in, i32 4 %val = load atomic i32, ptr %gep seq_cst, align 4 store i32 %val, ptr %out ret void @@ -5404,8 +5404,8 @@ define amdgpu_kernel void @atomic_load_i32_addr64_offset(ptr %in, ptr %out, i64 ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %in, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %in, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = load atomic i32, ptr %gep seq_cst, align 4 store i32 %val, ptr %out ret void @@ -5466,7 +5466,7 @@ define amdgpu_kernel void @atomic_load_i32_addr64(ptr %in, ptr %out, i64 %index) ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %in, i64 %index + %ptr = getelementptr inbounds i32, ptr %in, i64 %index %val = load atomic i32, ptr %ptr seq_cst, align 4 store i32 %val, ptr %out ret void @@ -5510,7 +5510,7 @@ define amdgpu_kernel void @atomic_store_i32_offset(i32 %in, ptr %out) { ; GCN3-NEXT: flat_store_dword v[0:1], v2 offset:16 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 store atomic i32 %in, ptr %gep seq_cst, align 4 ret void } @@ -5600,8 +5600,8 @@ define amdgpu_kernel void @atomic_store_i32_addr64_offset(i32 %in, ptr %out, i64 ; GCN3-NEXT: flat_store_dword v[0:1], v2 offset:16 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 store atomic i32 %in, ptr %gep seq_cst, align 4 ret void } @@ -5649,7 +5649,7 @@ define amdgpu_kernel void @atomic_store_i32_addr64(i32 %in, ptr %out, i64 %index ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index store atomic i32 %in, ptr %ptr seq_cst, align 4 ret void } @@ -5701,7 +5701,7 @@ define amdgpu_kernel void @atomic_load_f32_offset(ptr %in, ptr %out) { ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr float, ptr %in, i32 4 + %gep = getelementptr inbounds float, ptr %in, i32 4 %val = load atomic float, ptr %gep seq_cst, align 4 store float %val, ptr %out ret void @@ -5814,8 +5814,8 @@ define amdgpu_kernel void @atomic_load_f32_addr64_offset(ptr %in, ptr %out, i64 ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr float, ptr %in, i64 %index - %gep = getelementptr float, ptr %ptr, i32 4 + %ptr = getelementptr inbounds float, ptr %in, i64 %index + %gep = getelementptr inbounds float, ptr %ptr, i32 4 %val = load atomic float, ptr %gep seq_cst, align 4 store float %val, ptr %out ret void @@ -5876,7 +5876,7 @@ define amdgpu_kernel void @atomic_load_f32_addr64(ptr %in, ptr %out, i64 %index) ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr float, ptr %in, i64 %index + %ptr = getelementptr inbounds float, ptr %in, i64 %index %val = load atomic float, ptr %ptr seq_cst, align 4 store float %val, ptr %out ret void @@ -5920,7 +5920,7 @@ define amdgpu_kernel void @atomic_store_f32_offset(float %in, ptr %out) { ; GCN3-NEXT: flat_store_dword v[0:1], v2 offset:16 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr float, ptr %out, i32 4 + %gep = getelementptr inbounds float, ptr %out, i32 4 store atomic float %in, ptr %gep seq_cst, align 4 ret void } @@ -6010,8 +6010,8 @@ define amdgpu_kernel void @atomic_store_f32_addr64_offset(float %in, ptr %out, i ; GCN3-NEXT: flat_store_dword v[0:1], v2 offset:16 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr float, ptr %out, i64 %index - %gep = getelementptr float, ptr %ptr, i32 4 + %ptr = getelementptr inbounds float, ptr %out, i64 %index + %gep = getelementptr inbounds float, ptr %ptr, i32 4 store atomic float %in, ptr %gep seq_cst, align 4 ret void } @@ -6059,7 +6059,7 @@ define amdgpu_kernel void @atomic_store_f32_addr64(float %in, ptr %out, i64 %ind ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr float, ptr %out, i64 %index + %ptr = getelementptr inbounds float, ptr %out, i64 %index store atomic float %in, ptr %ptr seq_cst, align 4 ret void } @@ -6111,7 +6111,7 @@ define amdgpu_kernel void @atomic_load_i8_offset(ptr %in, ptr %out) { ; GCN3-NEXT: flat_store_byte v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i8, ptr %in, i64 16 + %gep = getelementptr inbounds i8, ptr %in, i64 16 %val = load atomic i8, ptr %gep seq_cst, align 1 store i8 %val, ptr %out ret void @@ -6221,8 +6221,8 @@ define amdgpu_kernel void @atomic_load_i8_addr64_offset(ptr %in, ptr %out, i64 % ; GCN3-NEXT: flat_store_byte v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i8, ptr %in, i64 %index - %gep = getelementptr i8, ptr %ptr, i64 16 + %ptr = getelementptr inbounds i8, ptr %in, i64 %index + %gep = getelementptr inbounds i8, ptr %ptr, i64 16 %val = load atomic i8, ptr %gep seq_cst, align 1 store i8 %val, ptr %out ret void @@ -6266,7 +6266,7 @@ define amdgpu_kernel void @atomic_store_i8_offset(i8 %in, ptr %out) { ; GCN3-NEXT: flat_store_byte v[0:1], v2 offset:16 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i8, ptr %out, i64 16 + %gep = getelementptr inbounds i8, ptr %out, i64 16 store atomic i8 %in, ptr %gep seq_cst, align 1 ret void } @@ -6353,8 +6353,8 @@ define amdgpu_kernel void @atomic_store_i8_addr64_offset(i8 %in, ptr %out, i64 % ; GCN3-NEXT: flat_store_byte v[0:1], v2 offset:16 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i8, ptr %out, i64 %index - %gep = getelementptr i8, ptr %ptr, i64 16 + %ptr = getelementptr inbounds i8, ptr %out, i64 %index + %gep = getelementptr inbounds i8, ptr %ptr, i64 16 store atomic i8 %in, ptr %gep seq_cst, align 1 ret void } @@ -6406,7 +6406,7 @@ define amdgpu_kernel void @atomic_load_i16_offset(ptr %in, ptr %out) { ; GCN3-NEXT: flat_store_short v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i16, ptr %in, i64 8 + %gep = getelementptr inbounds i16, ptr %in, i64 8 %val = load atomic i16, ptr %gep seq_cst, align 2 store i16 %val, ptr %out ret void @@ -6519,8 +6519,8 @@ define amdgpu_kernel void @atomic_load_i16_addr64_offset(ptr %in, ptr %out, i64 ; GCN3-NEXT: flat_store_short v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i16, ptr %in, i64 %index - %gep = getelementptr i16, ptr %ptr, i64 8 + %ptr = getelementptr inbounds i16, ptr %in, i64 %index + %gep = getelementptr inbounds i16, ptr %ptr, i64 8 %val = load atomic i16, ptr %gep seq_cst, align 2 store i16 %val, ptr %out ret void @@ -6564,7 +6564,7 @@ define amdgpu_kernel void @atomic_store_i16_offset(i16 %in, ptr %out) { ; GCN3-NEXT: flat_store_short v[0:1], v2 offset:16 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i16, ptr %out, i64 8 + %gep = getelementptr inbounds i16, ptr %out, i64 8 store atomic i16 %in, ptr %gep seq_cst, align 2 ret void } @@ -6654,8 +6654,8 @@ define amdgpu_kernel void @atomic_store_i16_addr64_offset(i16 %in, ptr %out, i64 ; GCN3-NEXT: flat_store_short v[0:1], v2 offset:16 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i16, ptr %out, i64 %index - %gep = getelementptr i16, ptr %ptr, i64 8 + %ptr = getelementptr inbounds i16, ptr %out, i64 %index + %gep = getelementptr inbounds i16, ptr %ptr, i64 8 store atomic i16 %in, ptr %gep seq_cst, align 2 ret void } @@ -6698,7 +6698,7 @@ define amdgpu_kernel void @atomic_store_f16_offset(half %in, ptr %out) { ; GCN3-NEXT: flat_store_short v[0:1], v2 offset:16 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr half, ptr %out, i64 8 + %gep = getelementptr inbounds half, ptr %out, i64 8 store atomic half %in, ptr %gep seq_cst, align 2 ret void } @@ -6774,7 +6774,7 @@ define amdgpu_kernel void @atomic_store_bf16_offset(bfloat %in, ptr %out) { ; GCN3-NEXT: v_mov_b32_e32 v2, s2 ; GCN3-NEXT: flat_store_short v[0:1], v2 ; GCN3-NEXT: s_endpgm - %gep = getelementptr bfloat, ptr %out, i64 8 + %gep = getelementptr inbounds bfloat, ptr %out, i64 8 store atomic bfloat %in, ptr %out seq_cst, align 2 ret void } @@ -6860,7 +6860,7 @@ define amdgpu_kernel void @atomic_inc_i32_offset(ptr %out, i32 %in) { ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile uinc_wrap ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -6909,7 +6909,7 @@ define amdgpu_kernel void @atomic_inc_i32_max_offset(ptr %out, i32 %in) { ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 1023 + %gep = getelementptr inbounds i32, ptr %out, i32 1023 %val = atomicrmw volatile uinc_wrap ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -6960,7 +6960,7 @@ define amdgpu_kernel void @atomic_inc_i32_max_offset_p1(ptr %out, i32 %in) { ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 1024 + %gep = getelementptr inbounds i32, ptr %out, i32 1024 %val = atomicrmw volatile uinc_wrap ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -7018,7 +7018,7 @@ define amdgpu_kernel void @atomic_inc_i32_ret_offset(ptr %out, ptr %out2, i32 %i ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile uinc_wrap ptr %gep, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -7080,8 +7080,8 @@ define amdgpu_kernel void @atomic_inc_i32_incr64_offset(ptr %out, i32 %in, i64 % ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile uinc_wrap ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -7151,8 +7151,8 @@ define amdgpu_kernel void @atomic_inc_i32_ret_incr64_offset(ptr %out, ptr %out2, ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile uinc_wrap ptr %gep, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -7308,7 +7308,7 @@ define amdgpu_kernel void @atomic_inc_i32_incr64(ptr %out, i32 %in, i64 %index) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile uinc_wrap ptr %ptr, i32 %in syncscope("agent") seq_cst ret void } @@ -7374,7 +7374,7 @@ define amdgpu_kernel void @atomic_inc_i32_ret_incr64(ptr %out, ptr %out2, i32 %i ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile uinc_wrap ptr %ptr, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -7424,7 +7424,7 @@ define amdgpu_kernel void @atomic_dec_i32_offset(ptr %out, i32 %in) { ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile udec_wrap ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -7473,7 +7473,7 @@ define amdgpu_kernel void @atomic_dec_i32_max_offset(ptr %out, i32 %in) { ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 1023 + %gep = getelementptr inbounds i32, ptr %out, i32 1023 %val = atomicrmw volatile udec_wrap ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -7524,7 +7524,7 @@ define amdgpu_kernel void @atomic_dec_i32_max_offset_p1(ptr %out, i32 %in) { ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 1024 + %gep = getelementptr inbounds i32, ptr %out, i32 1024 %val = atomicrmw volatile udec_wrap ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -7582,7 +7582,7 @@ define amdgpu_kernel void @atomic_dec_i32_ret_offset(ptr %out, ptr %out2, i32 %i ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = atomicrmw volatile udec_wrap ptr %gep, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -7644,8 +7644,8 @@ define amdgpu_kernel void @atomic_dec_i32_decr64_offset(ptr %out, i32 %in, i64 % ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile udec_wrap ptr %gep, i32 %in syncscope("agent") seq_cst ret void } @@ -7715,8 +7715,8 @@ define amdgpu_kernel void @atomic_dec_i32_ret_decr64_offset(ptr %out, ptr %out2, ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i64 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %val = atomicrmw volatile udec_wrap ptr %gep, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -7872,7 +7872,7 @@ define amdgpu_kernel void @atomic_dec_i32_decr64(ptr %out, i32 %in, i64 %index) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile udec_wrap ptr %ptr, i32 %in syncscope("agent") seq_cst ret void } @@ -7938,7 +7938,7 @@ define amdgpu_kernel void @atomic_dec_i32_ret_decr64(ptr %out, ptr %out2, i32 %i ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i64 %index + %ptr = getelementptr inbounds i32, ptr %out, i64 %index %val = atomicrmw volatile udec_wrap ptr %ptr, i32 %in syncscope("agent") seq_cst store i32 %val, ptr %out2 ret void @@ -7990,7 +7990,7 @@ define amdgpu_kernel void @atomic_load_f16_offset(ptr %in, ptr %out) { ; GCN3-NEXT: v_mov_b32_e32 v1, s3 ; GCN3-NEXT: flat_store_short v[0:1], v2 ; GCN3-NEXT: s_endpgm - %gep = getelementptr half, ptr %in, i64 8 + %gep = getelementptr inbounds half, ptr %in, i64 8 %val = load atomic half, ptr %gep seq_cst, align 2 store half %val, ptr %out ret void @@ -8089,7 +8089,7 @@ define amdgpu_kernel void @atomic_load_bf16_offset(ptr %in, ptr %out) { ; GCN3-NEXT: v_mov_b32_e32 v1, s3 ; GCN3-NEXT: flat_store_short v[0:1], v2 ; GCN3-NEXT: s_endpgm - %gep = getelementptr bfloat, ptr %in, i64 8 + %gep = getelementptr inbounds bfloat, ptr %in, i64 8 %val = load atomic bfloat, ptr %gep seq_cst, align 2 store bfloat %val, ptr %out ret void diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll index 1311560715ddd..0e84c7295d29b 100644 --- a/llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll +++ b/llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll @@ -63,7 +63,7 @@ define void @flat_atomic_xchg_i32_noret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw xchg ptr %gep, i32 %in seq_cst ret void } @@ -124,7 +124,7 @@ define i32 @flat_atomic_xchg_i32_ret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw xchg ptr %gep, i32 %in seq_cst ret i32 %result } @@ -203,7 +203,7 @@ define amdgpu_gfx void @flat_atomic_xchg_i32_noret_offset_scalar(ptr inreg %out, ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw xchg ptr %gep, i32 %in seq_cst ret void } @@ -282,7 +282,7 @@ define amdgpu_gfx i32 @flat_atomic_xchg_i32_ret_offset_scalar(ptr inreg %out, i3 ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw xchg ptr %gep, i32 %in seq_cst ret i32 %result } @@ -315,7 +315,7 @@ define void @flat_atomic_xchg_i32_noret_offset__amdgpu_no_remote_memory(ptr %out ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %tmp0 = atomicrmw xchg ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret void } @@ -348,7 +348,7 @@ define i32 @flat_atomic_xchg_i32_ret_offset__amdgpu_no_remote_memory(ptr %out, i ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %result = atomicrmw xchg ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret i32 %result } @@ -413,7 +413,7 @@ define void @flat_atomic_xchg_f32_noret_offset(ptr %out, float %in) { ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %out, i32 4 + %gep = getelementptr inbounds float, ptr %out, i32 4 %tmp0 = atomicrmw xchg ptr %gep, float %in seq_cst ret void } @@ -474,7 +474,7 @@ define float @flat_atomic_xchg_f32_ret_offset(ptr %out, float %in) { ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %out, i32 4 + %gep = getelementptr inbounds float, ptr %out, i32 4 %result = atomicrmw xchg ptr %gep, float %in seq_cst ret float %result } @@ -553,7 +553,7 @@ define amdgpu_gfx void @flat_atomic_xchg_f32_noret_offset_scalar(ptr inreg %out, ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %out, i32 4 + %gep = getelementptr inbounds float, ptr %out, i32 4 %tmp0 = atomicrmw xchg ptr %gep, float %in seq_cst ret void } @@ -632,7 +632,7 @@ define amdgpu_gfx float @flat_atomic_xchg_f32_ret_offset_scalar(ptr inreg %out, ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %out, i32 4 + %gep = getelementptr inbounds float, ptr %out, i32 4 %result = atomicrmw xchg ptr %gep, float %in seq_cst ret float %result } @@ -665,7 +665,7 @@ define void @flat_atomic_xchg_f32_noret_offset__amdgpu_no_remote_memory(ptr %out ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %out, i64 4 + %gep = getelementptr inbounds float, ptr %out, i64 4 %tmp0 = atomicrmw xchg ptr %gep, float %in seq_cst, !amdgpu.no.remote.memory !0 ret void } @@ -698,7 +698,7 @@ define float @flat_atomic_xchg_f32_ret_offset__amdgpu_no_remote_memory(ptr %out, ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr float, ptr %out, i64 4 + %gep = getelementptr inbounds float, ptr %out, i64 4 %result = atomicrmw xchg ptr %gep, float %in seq_cst, !amdgpu.no.remote.memory !0 ret float %result } @@ -763,7 +763,7 @@ define void @flat_atomic_add_i32_noret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw add ptr %gep, i32 %in seq_cst ret void } @@ -824,7 +824,7 @@ define i32 @flat_atomic_add_i32_ret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw add ptr %gep, i32 %in seq_cst ret i32 %result } @@ -903,7 +903,7 @@ define amdgpu_gfx void @flat_atomic_add_i32_noret_offset_scalar(ptr inreg %out, ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw add ptr %gep, i32 %in seq_cst ret void } @@ -982,7 +982,7 @@ define amdgpu_gfx i32 @flat_atomic_add_i32_ret_offset_scalar(ptr inreg %out, i32 ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw add ptr %gep, i32 %in seq_cst ret i32 %result } @@ -1015,7 +1015,7 @@ define void @flat_atomic_add_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %tmp0 = atomicrmw add ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret void } @@ -1048,7 +1048,7 @@ define i32 @flat_atomic_add_i32_ret_offset__amdgpu_no_remote_memory(ptr %out, i3 ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %result = atomicrmw add ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret i32 %result } @@ -1113,7 +1113,7 @@ define void @flat_atomic_sub_i32_noret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw sub ptr %gep, i32 %in seq_cst ret void } @@ -1174,7 +1174,7 @@ define i32 @flat_atomic_sub_i32_ret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw sub ptr %gep, i32 %in seq_cst ret i32 %result } @@ -1253,7 +1253,7 @@ define amdgpu_gfx void @flat_atomic_sub_i32_noret_offset_scalar(ptr inreg %out, ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw sub ptr %gep, i32 %in seq_cst ret void } @@ -1332,7 +1332,7 @@ define amdgpu_gfx i32 @flat_atomic_sub_i32_ret_offset_scalar(ptr inreg %out, i32 ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw sub ptr %gep, i32 %in seq_cst ret i32 %result } @@ -1365,7 +1365,7 @@ define void @flat_atomic_sub_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %tmp0 = atomicrmw sub ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret void } @@ -1398,7 +1398,7 @@ define i32 @flat_atomic_sub_i32_ret_offset__amdgpu_no_remote_memory(ptr %out, i3 ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %result = atomicrmw sub ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret i32 %result } @@ -1463,7 +1463,7 @@ define void @flat_atomic_and_i32_noret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw and ptr %gep, i32 %in seq_cst ret void } @@ -1524,7 +1524,7 @@ define i32 @flat_atomic_and_i32_ret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw and ptr %gep, i32 %in seq_cst ret i32 %result } @@ -1603,7 +1603,7 @@ define amdgpu_gfx void @flat_atomic_and_i32_noret_offset_scalar(ptr inreg %out, ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw and ptr %gep, i32 %in seq_cst ret void } @@ -1682,7 +1682,7 @@ define amdgpu_gfx i32 @flat_atomic_and_i32_ret_offset_scalar(ptr inreg %out, i32 ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw and ptr %gep, i32 %in seq_cst ret i32 %result } @@ -1715,7 +1715,7 @@ define void @flat_atomic_and_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %tmp0 = atomicrmw and ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret void } @@ -1748,7 +1748,7 @@ define i32 @flat_atomic_and_i32_ret_offset__amdgpu_no_remote_memory(ptr %out, i3 ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %result = atomicrmw and ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret i32 %result } @@ -1897,7 +1897,7 @@ define void @flat_atomic_nand_i32_noret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw nand ptr %gep, i32 %in seq_cst ret void } @@ -2046,7 +2046,7 @@ define i32 @flat_atomic_nand_i32_ret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: v_mov_b32_e32 v0, v3 ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw nand ptr %gep, i32 %in seq_cst ret i32 %result } @@ -2203,7 +2203,7 @@ define amdgpu_gfx void @flat_atomic_nand_i32_noret_offset_scalar(ptr inreg %out, ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[34:35] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw nand ptr %gep, i32 %in seq_cst ret void } @@ -2368,7 +2368,7 @@ define amdgpu_gfx i32 @flat_atomic_nand_i32_ret_offset_scalar(ptr inreg %out, i3 ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[34:35] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw nand ptr %gep, i32 %in seq_cst ret i32 %result } @@ -2443,7 +2443,7 @@ define void @flat_atomic_nand_i32_noret_offset__amdgpu_no_remote_memory(ptr %out ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %tmp0 = atomicrmw nand ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret void } @@ -2519,7 +2519,7 @@ define i32 @flat_atomic_nand_i32_ret_offset__amdgpu_no_remote_memory(ptr %out, i ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: v_mov_b32_e32 v0, v3 ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %result = atomicrmw nand ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret i32 %result } @@ -2584,7 +2584,7 @@ define void @flat_atomic_or_i32_noret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw or ptr %gep, i32 %in seq_cst ret void } @@ -2645,7 +2645,7 @@ define i32 @flat_atomic_or_i32_ret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw or ptr %gep, i32 %in seq_cst ret i32 %result } @@ -2724,7 +2724,7 @@ define amdgpu_gfx void @flat_atomic_or_i32_noret_offset_scalar(ptr inreg %out, i ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw or ptr %gep, i32 %in seq_cst ret void } @@ -2803,7 +2803,7 @@ define amdgpu_gfx i32 @flat_atomic_or_i32_ret_offset_scalar(ptr inreg %out, i32 ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw or ptr %gep, i32 %in seq_cst ret i32 %result } @@ -2836,7 +2836,7 @@ define void @flat_atomic_or_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %tmp0 = atomicrmw or ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret void } @@ -2869,7 +2869,7 @@ define i32 @flat_atomic_or_i32_ret_offset__amdgpu_no_remote_memory(ptr %out, i32 ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %result = atomicrmw or ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret i32 %result } @@ -2934,7 +2934,7 @@ define void @flat_atomic_xor_i32_noret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw xor ptr %gep, i32 %in seq_cst ret void } @@ -2995,7 +2995,7 @@ define i32 @flat_atomic_xor_i32_ret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw xor ptr %gep, i32 %in seq_cst ret i32 %result } @@ -3074,7 +3074,7 @@ define amdgpu_gfx void @flat_atomic_xor_i32_noret_offset_scalar(ptr inreg %out, ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw xor ptr %gep, i32 %in seq_cst ret void } @@ -3153,7 +3153,7 @@ define amdgpu_gfx i32 @flat_atomic_xor_i32_ret_offset_scalar(ptr inreg %out, i32 ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw xor ptr %gep, i32 %in seq_cst ret i32 %result } @@ -3186,7 +3186,7 @@ define void @flat_xor_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, i32 %i ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %tmp0 = atomicrmw xor ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret void } @@ -3219,7 +3219,7 @@ define i32 @flat_atomic_xor_i32_ret_offset__amdgpu_no_remote_memory(ptr %out, i3 ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %result = atomicrmw xor ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret i32 %result } @@ -3362,7 +3362,7 @@ define void @flat_atomic_max_i32_noret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw max ptr %gep, i32 %in seq_cst ret void } @@ -3505,7 +3505,7 @@ define i32 @flat_atomic_max_i32_ret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: v_mov_b32_e32 v0, v3 ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw max ptr %gep, i32 %in seq_cst ret i32 %result } @@ -3656,7 +3656,7 @@ define amdgpu_gfx void @flat_atomic_max_i32_noret_offset_scalar(ptr inreg %out, ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[34:35] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw max ptr %gep, i32 %in seq_cst ret void } @@ -3815,7 +3815,7 @@ define amdgpu_gfx i32 @flat_atomic_max_i32_ret_offset_scalar(ptr inreg %out, i32 ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[34:35] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw max ptr %gep, i32 %in seq_cst ret i32 %result } @@ -3909,8 +3909,8 @@ define amdgpu_kernel void @atomic_max_i32_addr64_offset(ptr %out, i32 %in, i32 % ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i32 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i32 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %tmp0 = atomicrmw max ptr %gep, i32 %in seq_cst ret void } @@ -4019,8 +4019,8 @@ define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(ptr %out, ptr %out2, ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i32 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i32 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %tmp0 = atomicrmw max ptr %gep, i32 %in seq_cst store i32 %tmp0, ptr %out2 ret void @@ -4111,7 +4111,7 @@ define amdgpu_kernel void @atomic_max_i32_addr64(ptr %out, i32 %in, i32 %index) ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i32 %index + %ptr = getelementptr inbounds i32, ptr %out, i32 %index %tmp0 = atomicrmw max ptr %ptr, i32 %in seq_cst ret void } @@ -4216,7 +4216,7 @@ define amdgpu_kernel void @atomic_max_i32_ret_addr64(ptr %out, ptr %out2, i32 %i ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i32 %index + %ptr = getelementptr inbounds i32, ptr %out, i32 %index %tmp0 = atomicrmw max ptr %ptr, i32 %in seq_cst store i32 %tmp0, ptr %out2 ret void @@ -4289,7 +4289,7 @@ define void @flat_max_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, i32 %i ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %tmp0 = atomicrmw max ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret void } @@ -4362,7 +4362,7 @@ define i32 @flat_atomic_max_i32_ret_offset__amdgpu_no_remote_memory(ptr %out, i3 ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: v_mov_b32_e32 v0, v3 ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %result = atomicrmw max ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret i32 %result } @@ -4505,7 +4505,7 @@ define void @flat_atomic_umax_i32_noret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw umax ptr %gep, i32 %in seq_cst ret void } @@ -4648,7 +4648,7 @@ define i32 @flat_atomic_umax_i32_ret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: v_mov_b32_e32 v0, v3 ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw umax ptr %gep, i32 %in seq_cst ret i32 %result } @@ -4799,7 +4799,7 @@ define amdgpu_gfx void @flat_atomic_umax_i32_noret_offset_scalar(ptr inreg %out, ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[34:35] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw umax ptr %gep, i32 %in seq_cst ret void } @@ -4958,7 +4958,7 @@ define amdgpu_gfx i32 @flat_atomic_umax_i32_ret_offset_scalar(ptr inreg %out, i3 ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[34:35] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw umax ptr %gep, i32 %in seq_cst ret i32 %result } @@ -5052,8 +5052,8 @@ define amdgpu_kernel void @atomic_umax_i32_addr64_offset(ptr %out, i32 %in, i32 ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i32 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i32 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %tmp0 = atomicrmw umax ptr %gep, i32 %in seq_cst ret void } @@ -5162,8 +5162,8 @@ define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(ptr %out, ptr %out2 ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i32 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i32 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %tmp0 = atomicrmw umax ptr %gep, i32 %in seq_cst store i32 %tmp0, ptr %out2 ret void @@ -5269,7 +5269,7 @@ define amdgpu_kernel void @atomic_umax_i32_ret_addr64(ptr %out, ptr %out2, i32 % ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i32 %index + %ptr = getelementptr inbounds i32, ptr %out, i32 %index %tmp0 = atomicrmw umax ptr %ptr, i32 %in seq_cst store i32 %tmp0, ptr %out2 ret void @@ -5342,7 +5342,7 @@ define void @flat_umax_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, i32 % ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %tmp0 = atomicrmw umax ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret void } @@ -5415,7 +5415,7 @@ define i32 @flat_atomic_umax_i32_ret_offset__amdgpu_no_remote_memory(ptr %out, i ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: v_mov_b32_e32 v0, v3 ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %result = atomicrmw umax ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret i32 %result } @@ -5558,7 +5558,7 @@ define void @flat_atomic_umin_i32_noret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw umin ptr %gep, i32 %in seq_cst ret void } @@ -5701,7 +5701,7 @@ define i32 @flat_atomic_umin_i32_ret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: v_mov_b32_e32 v0, v3 ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw umin ptr %gep, i32 %in seq_cst ret i32 %result } @@ -5852,7 +5852,7 @@ define amdgpu_gfx void @flat_atomic_umin_i32_noret_offset_scalar(ptr inreg %out, ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[34:35] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw umin ptr %gep, i32 %in seq_cst ret void } @@ -6011,7 +6011,7 @@ define amdgpu_gfx i32 @flat_atomic_umin_i32_ret_offset_scalar(ptr inreg %out, i3 ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[34:35] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw umin ptr %gep, i32 %in seq_cst ret i32 %result } @@ -6083,7 +6083,7 @@ define void @flat_umin_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, i32 % ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %tmp0 = atomicrmw umin ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret void } @@ -6156,7 +6156,7 @@ define i32 @flat_atomic_umin_i32_ret_offset__amdgpu_no_remote_memory(ptr %out, i ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: v_mov_b32_e32 v0, v3 ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %result = atomicrmw umin ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret i32 %result } @@ -6299,7 +6299,7 @@ define void @flat_atomic_min_i32_noret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw min ptr %gep, i32 %in seq_cst ret void } @@ -6442,7 +6442,7 @@ define i32 @flat_atomic_min_i32_ret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: v_mov_b32_e32 v0, v3 ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw min ptr %gep, i32 %in seq_cst ret i32 %result } @@ -6593,7 +6593,7 @@ define amdgpu_gfx void @flat_atomic_min_i32_noret_offset_scalar(ptr inreg %out, ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[34:35] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw min ptr %gep, i32 %in seq_cst ret void } @@ -6752,7 +6752,7 @@ define amdgpu_gfx i32 @flat_atomic_min_i32_ret_offset_scalar(ptr inreg %out, i32 ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[34:35] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw min ptr %gep, i32 %in seq_cst ret i32 %result } @@ -6846,8 +6846,8 @@ define amdgpu_kernel void @atomic_min_i32_addr64_offset(ptr %out, i32 %in, i32 % ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i32 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i32 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %tmp0 = atomicrmw min ptr %gep, i32 %in seq_cst ret void } @@ -6956,8 +6956,8 @@ define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(ptr %out, ptr %out2, ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i32 %index - %gep = getelementptr i32, ptr %ptr, i32 4 + %ptr = getelementptr inbounds i32, ptr %out, i32 %index + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %tmp0 = atomicrmw min ptr %gep, i32 %in seq_cst store i32 %tmp0, ptr %out2 ret void @@ -7140,7 +7140,7 @@ define amdgpu_kernel void @atomic_min_i32_ret_addr64(ptr %out, ptr %out2, i32 %i ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %ptr = getelementptr i32, ptr %out, i32 %index + %ptr = getelementptr inbounds i32, ptr %out, i32 %index %tmp0 = atomicrmw min ptr %ptr, i32 %in seq_cst store i32 %tmp0, ptr %out2 ret void @@ -7213,7 +7213,7 @@ define void @flat_min_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, i32 %i ; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %tmp0 = atomicrmw min ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret void } @@ -7286,7 +7286,7 @@ define i32 @flat_atomic_min_i32_ret_offset__amdgpu_no_remote_memory(ptr %out, i3 ; GCN3-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN3-NEXT: v_mov_b32_e32 v0, v3 ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %result = atomicrmw min ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret i32 %result } @@ -7351,7 +7351,7 @@ define void @flat_atomic_uinc_wrap_i32_noret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw uinc_wrap ptr %gep, i32 %in seq_cst ret void } @@ -7412,7 +7412,7 @@ define i32 @flat_atomic_uinc_wrap_i32_ret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw uinc_wrap ptr %gep, i32 %in seq_cst ret i32 %result } @@ -7491,7 +7491,7 @@ define amdgpu_gfx void @flat_atomic_uinc_wrap_i32_noret_offset_scalar(ptr inreg ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw uinc_wrap ptr %gep, i32 %in seq_cst ret void } @@ -7570,7 +7570,7 @@ define amdgpu_gfx i32 @flat_atomic_uinc_wrap_i32_ret_offset_scalar(ptr inreg %ou ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw uinc_wrap ptr %gep, i32 %in seq_cst ret i32 %result } @@ -7603,7 +7603,7 @@ define void @flat_uinc_wrap_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %tmp0 = atomicrmw uinc_wrap ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret void } @@ -7636,7 +7636,7 @@ define i32 @flat_atomic_uinc_wrap_i32_ret_offset__amdgpu_no_remote_memory(ptr %o ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %result = atomicrmw uinc_wrap ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret i32 %result } @@ -7701,7 +7701,7 @@ define void @flat_atomic_udec_wrap_i32_noret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw udec_wrap ptr %gep, i32 %in seq_cst ret void } @@ -7762,7 +7762,7 @@ define i32 @flat_atomic_udec_wrap_i32_ret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw udec_wrap ptr %gep, i32 %in seq_cst ret i32 %result } @@ -7841,7 +7841,7 @@ define amdgpu_gfx void @flat_atomic_udec_wrap_i32_noret_offset_scalar(ptr inreg ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %tmp0 = atomicrmw udec_wrap ptr %gep, i32 %in seq_cst ret void } @@ -7920,7 +7920,7 @@ define amdgpu_gfx i32 @flat_atomic_udec_wrap_i32_ret_offset_scalar(ptr inreg %ou ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %result = atomicrmw udec_wrap ptr %gep, i32 %in seq_cst ret i32 %result } @@ -7953,7 +7953,7 @@ define void @flat_udec_wrap_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %tmp0 = atomicrmw udec_wrap ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret void } @@ -7986,7 +7986,7 @@ define i32 @flat_atomic_udec_wrap_i32_ret_offset__amdgpu_no_remote_memory(ptr %o ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: buffer_wbinvl1_vol ; GCN3-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i32, ptr %out, i64 4 + %gep = getelementptr inbounds i32, ptr %out, i64 4 %result = atomicrmw udec_wrap ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0 ret i32 %result } diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll index ffe0596a95e33..723234c3038d9 100644 --- a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll +++ b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll @@ -11810,7 +11810,7 @@ define amdgpu_kernel void @atomic_load_i64_offset(ptr %in, ptr %out) { ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %in, i64 4 + %gep = getelementptr inbounds i64, ptr %in, i64 4 %val = load atomic i64, ptr %gep seq_cst, align 8 store i64 %val, ptr %out ret void @@ -11920,8 +11920,8 @@ define amdgpu_kernel void @atomic_load_i64_addr64_offset(ptr %in, ptr %out, i64 ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %in, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %in, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %val = load atomic i64, ptr %gep seq_cst, align 8 store i64 %val, ptr %out ret void @@ -11981,7 +11981,7 @@ define amdgpu_kernel void @atomic_load_i64_addr64(ptr %in, ptr %out, i64 %index) ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %in, i64 %index + %ptr = getelementptr inbounds i64, ptr %in, i64 %index %val = load atomic i64, ptr %ptr seq_cst, align 8 store i64 %val, ptr %out ret void @@ -12025,7 +12025,7 @@ define amdgpu_kernel void @atomic_store_i64_offset(i64 %in, ptr %out) { ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] offset:32 scope:SCOPE_SYS ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 store atomic i64 %in, ptr %gep seq_cst, align 8 ret void } @@ -12119,8 +12119,8 @@ define amdgpu_kernel void @atomic_store_i64_addr64_offset(i64 %in, ptr %out, i64 ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] offset:32 scope:SCOPE_SYS ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 store atomic i64 %in, ptr %gep seq_cst, align 8 ret void } @@ -13597,7 +13597,7 @@ define amdgpu_kernel void @atomic_load_f64_offset(ptr %in, ptr %out) { ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr double, ptr %in, i64 4 + %gep = getelementptr inbounds double, ptr %in, i64 4 %val = load atomic double, ptr %gep seq_cst, align 8 store double %val, ptr %out ret void @@ -13707,8 +13707,8 @@ define amdgpu_kernel void @atomic_load_f64_addr64_offset(ptr %in, ptr %out, i64 ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr double, ptr %in, i64 %index - %gep = getelementptr double, ptr %ptr, i64 4 + %ptr = getelementptr inbounds double, ptr %in, i64 %index + %gep = getelementptr inbounds double, ptr %ptr, i64 4 %val = load atomic double, ptr %gep seq_cst, align 8 store double %val, ptr %out ret void @@ -13812,7 +13812,7 @@ define amdgpu_kernel void @atomic_store_f64_offset(double %in, ptr %out) { ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] offset:32 scope:SCOPE_SYS ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr double, ptr %out, i64 4 + %gep = getelementptr inbounds double, ptr %out, i64 4 store atomic double %in, ptr %gep seq_cst, align 8 ret void } @@ -13906,8 +13906,8 @@ define amdgpu_kernel void @atomic_store_f64_addr64_offset(double %in, ptr %out, ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] offset:32 scope:SCOPE_SYS ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr double, ptr %out, i64 %index - %gep = getelementptr double, ptr %ptr, i64 4 + %ptr = getelementptr inbounds double, ptr %out, i64 %index + %gep = getelementptr inbounds double, ptr %ptr, i64 4 store atomic double %in, ptr %gep seq_cst, align 8 ret void } diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_noprivate.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_noprivate.ll index 3c1bc95cc38f6..67ad8f3ad4784 100644 --- a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_noprivate.ll +++ b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_noprivate.ll @@ -45,7 +45,7 @@ define amdgpu_kernel void @atomic_add_i64_offset(ptr %out, i64 %in) { ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile add ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -104,7 +104,7 @@ define amdgpu_kernel void @atomic_add_i64_ret_offset(ptr %out, ptr %out2, i64 %i ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile add ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -165,8 +165,8 @@ define amdgpu_kernel void @atomic_add_i64_addr64_offset(ptr %out, i64 %in, i64 % ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile add ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -230,8 +230,8 @@ define amdgpu_kernel void @atomic_add_i64_ret_addr64_offset(ptr %out, ptr %out2, ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile add ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -385,7 +385,7 @@ define amdgpu_kernel void @atomic_add_i64_addr64(ptr %out, i64 %in, i64 %index) ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile add ptr %ptr, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -445,7 +445,7 @@ define amdgpu_kernel void @atomic_add_i64_ret_addr64(ptr %out, ptr %out2, i64 %i ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile add ptr %ptr, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -493,7 +493,7 @@ define amdgpu_kernel void @atomic_and_i64_offset(ptr %out, i64 %in) { ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile and ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -552,7 +552,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_offset(ptr %out, ptr %out2, i64 %i ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile and ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -613,8 +613,8 @@ define amdgpu_kernel void @atomic_and_i64_addr64_offset(ptr %out, i64 %in, i64 % ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile and ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -678,8 +678,8 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64_offset(ptr %out, ptr %out2, ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile and ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -833,7 +833,7 @@ define amdgpu_kernel void @atomic_and_i64_addr64(ptr %out, i64 %in, i64 %index) ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile and ptr %ptr, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -893,7 +893,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64(ptr %out, ptr %out2, i64 %i ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile and ptr %ptr, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -941,7 +941,7 @@ define amdgpu_kernel void @atomic_sub_i64_offset(ptr %out, i64 %in) { ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile sub ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -1000,7 +1000,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_offset(ptr %out, ptr %out2, i64 %i ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile sub ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -1061,8 +1061,8 @@ define amdgpu_kernel void @atomic_sub_i64_addr64_offset(ptr %out, i64 %in, i64 % ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile sub ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -1126,8 +1126,8 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64_offset(ptr %out, ptr %out2, ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile sub ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -1281,7 +1281,7 @@ define amdgpu_kernel void @atomic_sub_i64_addr64(ptr %out, i64 %in, i64 %index) ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile sub ptr %ptr, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -1341,7 +1341,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64(ptr %out, ptr %out2, i64 %i ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile sub ptr %ptr, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -1387,7 +1387,7 @@ define amdgpu_kernel void @atomic_max_i64_offset(ptr %out, i64 %in) { ; GFX12-NEXT: global_inv scope:SCOPE_SE ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile max ptr %gep, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 ret void } @@ -1446,7 +1446,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_offset(ptr %out, ptr %out2, i64 %i ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile max ptr %gep, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -1505,8 +1505,8 @@ define amdgpu_kernel void @atomic_max_i64_addr64_offset(ptr %out, i64 %in, i64 % ; GFX12-NEXT: global_inv scope:SCOPE_SE ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile max ptr %gep, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 ret void } @@ -1570,8 +1570,8 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2, ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile max ptr %gep, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -1721,7 +1721,7 @@ define amdgpu_kernel void @atomic_max_i64_addr64(ptr %out, i64 %in, i64 %index) ; GFX12-NEXT: global_inv scope:SCOPE_SE ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile max ptr %ptr, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 ret void } @@ -1781,7 +1781,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %i ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile max ptr %ptr, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -1827,7 +1827,7 @@ define amdgpu_kernel void @atomic_umax_i64_offset(ptr %out, i64 %in) { ; GFX12-NEXT: global_inv scope:SCOPE_SE ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 ret void } @@ -1886,7 +1886,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_offset(ptr %out, ptr %out2, i64 % ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -1945,8 +1945,8 @@ define amdgpu_kernel void @atomic_umax_i64_addr64_offset(ptr %out, i64 %in, i64 ; GFX12-NEXT: global_inv scope:SCOPE_SE ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 ret void } @@ -2010,8 +2010,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2 ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -2161,7 +2161,7 @@ define amdgpu_kernel void @atomic_umax_i64_addr64(ptr %out, i64 %in, i64 %index) ; GFX12-NEXT: global_inv scope:SCOPE_SE ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile umax ptr %ptr, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 ret void } @@ -2221,7 +2221,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 % ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile umax ptr %ptr, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -2267,7 +2267,7 @@ define amdgpu_kernel void @atomic_min_i64_offset(ptr %out, i64 %in) { ; GFX12-NEXT: global_inv scope:SCOPE_SE ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile min ptr %gep, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 ret void } @@ -2326,7 +2326,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_offset(ptr %out, ptr %out2, i64 %i ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile min ptr %gep, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -2385,8 +2385,8 @@ define amdgpu_kernel void @atomic_min_i64_addr64_offset(ptr %out, i64 %in, i64 % ; GFX12-NEXT: global_inv scope:SCOPE_SE ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile min ptr %gep, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 ret void } @@ -2450,8 +2450,8 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2, ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile min ptr %gep, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -2601,7 +2601,7 @@ define amdgpu_kernel void @atomic_min_i64_addr64(ptr %out, i64 %in, i64 %index) ; GFX12-NEXT: global_inv scope:SCOPE_SE ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile min ptr %ptr, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 ret void } @@ -2661,7 +2661,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %i ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile min ptr %ptr, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -2707,7 +2707,7 @@ define amdgpu_kernel void @atomic_umin_i64_offset(ptr %out, i64 %in) { ; GFX12-NEXT: global_inv scope:SCOPE_SE ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 ret void } @@ -2766,7 +2766,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_offset(ptr %out, ptr %out2, i64 % ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -2825,8 +2825,8 @@ define amdgpu_kernel void @atomic_umin_i64_addr64_offset(ptr %out, i64 %in, i64 ; GFX12-NEXT: global_inv scope:SCOPE_SE ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 ret void } @@ -2890,8 +2890,8 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(ptr %out, ptr %out2 ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -3041,7 +3041,7 @@ define amdgpu_kernel void @atomic_umin_i64_addr64(ptr %out, i64 %in, i64 %index) ; GFX12-NEXT: global_inv scope:SCOPE_SE ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile umin ptr %ptr, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 ret void } @@ -3101,7 +3101,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64(ptr %out, ptr %out2, i64 % ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile umin ptr %ptr, i64 %in syncscope("workgroup") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -3149,7 +3149,7 @@ define amdgpu_kernel void @atomic_or_i64_offset(ptr %out, i64 %in) { ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile or ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -3208,7 +3208,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_offset(ptr %out, ptr %out2, i64 %in ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile or ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -3269,8 +3269,8 @@ define amdgpu_kernel void @atomic_or_i64_addr64_offset(ptr %out, i64 %in, i64 %i ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile or ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -3334,8 +3334,8 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64_offset(ptr %out, ptr %out2, ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile or ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -3489,7 +3489,7 @@ define amdgpu_kernel void @atomic_or_i64_addr64(ptr %out, i64 %in, i64 %index) { ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile or ptr %ptr, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -3549,7 +3549,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64(ptr %out, ptr %out2, i64 %in ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile or ptr %ptr, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -3597,7 +3597,7 @@ define amdgpu_kernel void @atomic_xchg_i64_offset(ptr %out, i64 %in) { ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile xchg ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -3644,7 +3644,7 @@ define amdgpu_kernel void @atomic_xchg_f64_offset(ptr %out, double %in) { ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr double, ptr %out, i64 4 + %gep = getelementptr inbounds double, ptr %out, i64 4 %tmp0 = atomicrmw volatile xchg ptr %gep, double %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -3691,7 +3691,7 @@ define amdgpu_kernel void @atomic_xchg_pointer_offset(ptr %out, ptr %in) { ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr ptr, ptr %out, i32 4 + %gep = getelementptr inbounds ptr, ptr %out, i32 4 %val = atomicrmw volatile xchg ptr %gep, ptr %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -3750,7 +3750,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_offset(ptr %out, ptr %out2, i64 % ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile xchg ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -3811,8 +3811,8 @@ define amdgpu_kernel void @atomic_xchg_i64_addr64_offset(ptr %out, i64 %in, i64 ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile xchg ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -3876,8 +3876,8 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64_offset(ptr %out, ptr %out2 ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile xchg ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -4031,7 +4031,7 @@ define amdgpu_kernel void @atomic_xchg_i64_addr64(ptr %out, i64 %in, i64 %index) ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile xchg ptr %ptr, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -4091,7 +4091,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64(ptr %out, ptr %out2, i64 % ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile xchg ptr %ptr, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -4139,7 +4139,7 @@ define amdgpu_kernel void @atomic_xor_i64_offset(ptr %out, i64 %in) { ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile xor ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -4198,7 +4198,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_offset(ptr %out, ptr %out2, i64 %i ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile xor ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -4259,8 +4259,8 @@ define amdgpu_kernel void @atomic_xor_i64_addr64_offset(ptr %out, i64 %in, i64 % ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile xor ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -4324,8 +4324,8 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64_offset(ptr %out, ptr %out2, ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile xor ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -4479,7 +4479,7 @@ define amdgpu_kernel void @atomic_xor_i64_addr64(ptr %out, i64 %in, i64 %index) ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile xor ptr %ptr, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -4539,7 +4539,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64(ptr %out, ptr %out2, i64 %i ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile xor ptr %ptr, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -4590,7 +4590,7 @@ define amdgpu_kernel void @atomic_load_i64_offset(ptr %in, ptr %out) { ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %in, i64 4 + %gep = getelementptr inbounds i64, ptr %in, i64 4 %val = load atomic i64, ptr %gep seq_cst, align 8 store i64 %val, ptr %out ret void @@ -4700,8 +4700,8 @@ define amdgpu_kernel void @atomic_load_i64_addr64_offset(ptr %in, ptr %out, i64 ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %in, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %in, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %val = load atomic i64, ptr %gep seq_cst, align 8 store i64 %val, ptr %out ret void @@ -4761,7 +4761,7 @@ define amdgpu_kernel void @atomic_load_i64_addr64(ptr %in, ptr %out, i64 %index) ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %in, i64 %index + %ptr = getelementptr inbounds i64, ptr %in, i64 %index %val = load atomic i64, ptr %ptr seq_cst, align 8 store i64 %val, ptr %out ret void @@ -4805,7 +4805,7 @@ define amdgpu_kernel void @atomic_store_i64_offset(i64 %in, ptr %out) { ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] offset:32 scope:SCOPE_SYS ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 store atomic i64 %in, ptr %gep seq_cst, align 8 ret void } @@ -4899,8 +4899,8 @@ define amdgpu_kernel void @atomic_store_i64_addr64_offset(i64 %in, ptr %out, i64 ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] offset:32 scope:SCOPE_SYS ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 store atomic i64 %in, ptr %gep seq_cst, align 8 ret void } @@ -4952,7 +4952,7 @@ define amdgpu_kernel void @atomic_store_i64_addr64(i64 %in, ptr %out, i64 %index ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] scope:SCOPE_SYS ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index store atomic i64 %in, ptr %ptr seq_cst, align 8 ret void } @@ -5008,7 +5008,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_offset(ptr %out, i64 %in, i64 %old ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %val = cmpxchg volatile ptr %gep, i64 %old, i64 %in syncscope("agent") seq_cst seq_cst, !noalias.addrspace !0 ret void } @@ -5064,7 +5064,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_soffset(ptr %out, i64 %in, i64 %ol ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 9000 + %gep = getelementptr inbounds i64, ptr %out, i64 9000 %val = cmpxchg volatile ptr %gep, i64 %old, i64 %in syncscope("agent") seq_cst seq_cst, !noalias.addrspace !0 ret void } @@ -5124,7 +5124,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_offset(ptr %out, ptr %out2, i6 ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %val = cmpxchg volatile ptr %gep, i64 %old, i64 %in syncscope("agent") seq_cst seq_cst, !noalias.addrspace !0 %extract0 = extractvalue { i64, i1 } %val, 0 store i64 %extract0, ptr %out2 @@ -5186,8 +5186,8 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_addr64_offset(ptr %out, i64 %in, i ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %val = cmpxchg volatile ptr %gep, i64 %old, i64 %in syncscope("agent") seq_cst seq_cst, !noalias.addrspace !0 ret void } @@ -5259,8 +5259,8 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64_offset(ptr %out, ptr %o ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %val = cmpxchg volatile ptr %gep, i64 %old, i64 %in syncscope("agent") seq_cst seq_cst, !noalias.addrspace !0 %extract0 = extractvalue { i64, i1 } %val, 0 store i64 %extract0, ptr %out2 @@ -5426,7 +5426,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_addr64(ptr %out, i64 %in, i64 %ind ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %val = cmpxchg volatile ptr %ptr, i64 %old, i64 %in syncscope("agent") seq_cst seq_cst, !noalias.addrspace !0 ret void } @@ -5494,7 +5494,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64(ptr %out, ptr %out2, i6 ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %val = cmpxchg volatile ptr %ptr, i64 %old, i64 %in syncscope("agent") seq_cst seq_cst, !noalias.addrspace !0 %extract0 = extractvalue { i64, i1 } %val, 0 store i64 %extract0, ptr %out2 @@ -5546,7 +5546,7 @@ define amdgpu_kernel void @atomic_load_f64_offset(ptr %in, ptr %out) { ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr double, ptr %in, i64 4 + %gep = getelementptr inbounds double, ptr %in, i64 4 %val = load atomic double, ptr %gep seq_cst, align 8, !noalias.addrspace !0 store double %val, ptr %out ret void @@ -5656,8 +5656,8 @@ define amdgpu_kernel void @atomic_load_f64_addr64_offset(ptr %in, ptr %out, i64 ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr double, ptr %in, i64 %index - %gep = getelementptr double, ptr %ptr, i64 4 + %ptr = getelementptr inbounds double, ptr %in, i64 %index + %gep = getelementptr inbounds double, ptr %ptr, i64 4 %val = load atomic double, ptr %gep seq_cst, align 8, !noalias.addrspace !0 store double %val, ptr %out ret void @@ -5717,7 +5717,7 @@ define amdgpu_kernel void @atomic_load_f64_addr64(ptr %in, ptr %out, i64 %index) ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr double, ptr %in, i64 %index + %ptr = getelementptr inbounds double, ptr %in, i64 %index %val = load atomic double, ptr %ptr seq_cst, align 8, !noalias.addrspace !0 store double %val, ptr %out ret void @@ -5761,7 +5761,7 @@ define amdgpu_kernel void @atomic_store_f64_offset(double %in, ptr %out) { ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] offset:32 scope:SCOPE_SYS ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr double, ptr %out, i64 4 + %gep = getelementptr inbounds double, ptr %out, i64 4 store atomic double %in, ptr %gep seq_cst, align 8, !noalias.addrspace !0 ret void } @@ -5855,8 +5855,8 @@ define amdgpu_kernel void @atomic_store_f64_addr64_offset(double %in, ptr %out, ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] offset:32 scope:SCOPE_SYS ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr double, ptr %out, i64 %index - %gep = getelementptr double, ptr %ptr, i64 4 + %ptr = getelementptr inbounds double, ptr %out, i64 %index + %gep = getelementptr inbounds double, ptr %ptr, i64 4 store atomic double %in, ptr %gep seq_cst, align 8, !noalias.addrspace !0 ret void } @@ -5908,7 +5908,7 @@ define amdgpu_kernel void @atomic_store_f64_addr64(double %in, ptr %out, i64 %in ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] scope:SCOPE_SYS ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr double, ptr %out, i64 %index + %ptr = getelementptr inbounds double, ptr %out, i64 %index store atomic double %in, ptr %ptr seq_cst, align 8, !noalias.addrspace !0 ret void } @@ -5955,7 +5955,7 @@ define amdgpu_kernel void @atomic_inc_i64_offset(ptr %out, i64 %in) { ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile uinc_wrap ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -6014,7 +6014,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret_offset(ptr %out, ptr %out2, i64 %i ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile uinc_wrap ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -6075,8 +6075,8 @@ define amdgpu_kernel void @atomic_inc_i64_incr64_offset(ptr %out, i64 %in, i64 % ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile uinc_wrap ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -6140,8 +6140,8 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64_offset(ptr %out, ptr %out2, ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile uinc_wrap ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -6295,7 +6295,7 @@ define amdgpu_kernel void @atomic_inc_i64_incr64(ptr %out, i64 %in, i64 %index) ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile uinc_wrap ptr %ptr, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -6355,7 +6355,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64(ptr %out, ptr %out2, i64 %i ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile uinc_wrap ptr %ptr, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -6403,7 +6403,7 @@ define amdgpu_kernel void @atomic_dec_i64_offset(ptr %out, i64 %in) { ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile udec_wrap ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -6462,7 +6462,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret_offset(ptr %out, ptr %out2, i64 %i ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw volatile udec_wrap ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -6523,8 +6523,8 @@ define amdgpu_kernel void @atomic_dec_i64_decr64_offset(ptr %out, i64 %in, i64 % ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile udec_wrap ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -6588,8 +6588,8 @@ define amdgpu_kernel void @atomic_dec_i64_ret_decr64_offset(ptr %out, ptr %out2, ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw volatile udec_wrap ptr %gep, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void @@ -6743,7 +6743,7 @@ define amdgpu_kernel void @atomic_dec_i64_decr64(ptr %out, i64 %in, i64 %index) ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile udec_wrap ptr %ptr, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 ret void } @@ -6803,7 +6803,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret_decr64(ptr %out, ptr %out2, i64 %i ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw volatile udec_wrap ptr %ptr, i64 %in syncscope("agent") seq_cst, !noalias.addrspace !0 store i64 %tmp0, ptr %out2 ret void diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system_noprivate.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system_noprivate.ll index fe47461ebf956..36fa4d4fe9018 100644 --- a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system_noprivate.ll +++ b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system_noprivate.ll @@ -63,7 +63,7 @@ define void @flat_atomic_xchg_i64_noret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw xchg ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -124,7 +124,7 @@ define i64 @flat_atomic_xchg_i64_ret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw xchg ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -209,7 +209,7 @@ define amdgpu_gfx void @flat_atomic_xchg_i64_noret_offset_scalar(ptr inreg %out, ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw xchg ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -294,7 +294,7 @@ define amdgpu_gfx i64 @flat_atomic_xchg_i64_ret_offset_scalar(ptr inreg %out, i6 ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw xchg ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -327,7 +327,7 @@ define void @flat_atomic_xchg_i64_noret_offset__amdgpu_no_remote_memory(ptr %out ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw xchg ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret void } @@ -360,7 +360,7 @@ define i64 @flat_atomic_xchg_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw xchg ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret i64 %result } @@ -425,7 +425,7 @@ define void @flat_atomic_xchg_f64_noret_offset(ptr %out, double %in) { ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr double, ptr %out, i32 4 + %gep = getelementptr inbounds double, ptr %out, i32 4 %tmp0 = atomicrmw xchg ptr %gep, double %in seq_cst, !noalias.addrspace !1 ret void } @@ -486,7 +486,7 @@ define double @flat_atomic_xchg_f64_ret_offset(ptr %out, double %in) { ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr double, ptr %out, i32 4 + %gep = getelementptr inbounds double, ptr %out, i32 4 %result = atomicrmw xchg ptr %gep, double %in seq_cst, !noalias.addrspace !1 ret double %result } @@ -571,7 +571,7 @@ define amdgpu_gfx void @flat_atomic_xchg_f64_noret_offset_scalar(ptr inreg %out, ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr double, ptr %out, i32 4 + %gep = getelementptr inbounds double, ptr %out, i32 4 %tmp0 = atomicrmw xchg ptr %gep, double %in seq_cst, !noalias.addrspace !1 ret void } @@ -656,7 +656,7 @@ define amdgpu_gfx double @flat_atomic_xchg_f64_ret_offset_scalar(ptr inreg %out, ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr double, ptr %out, i32 4 + %gep = getelementptr inbounds double, ptr %out, i32 4 %result = atomicrmw xchg ptr %gep, double %in seq_cst, !noalias.addrspace !1 ret double %result } @@ -689,7 +689,7 @@ define void @flat_atomic_xchg_f64_noret_offset__amdgpu_no_remote_memory(ptr %out ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr double, ptr %out, i64 4 + %gep = getelementptr inbounds double, ptr %out, i64 4 %tmp0 = atomicrmw xchg ptr %gep, double %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret void } @@ -722,7 +722,7 @@ define double @flat_atomic_xchg_f64_ret_offset__amdgpu_no_remote_memory(ptr %out ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr double, ptr %out, i64 4 + %gep = getelementptr inbounds double, ptr %out, i64 4 %result = atomicrmw xchg ptr %gep, double %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret double %result } @@ -787,7 +787,7 @@ define void @flat_atomic_add_i64_noret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw add ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -848,7 +848,7 @@ define i64 @flat_atomic_add_i64_ret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw add ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -933,7 +933,7 @@ define amdgpu_gfx void @flat_atomic_add_i64_noret_offset_scalar(ptr inreg %out, ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw add ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -1018,7 +1018,7 @@ define amdgpu_gfx i64 @flat_atomic_add_i64_ret_offset_scalar(ptr inreg %out, i64 ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw add ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -1051,7 +1051,7 @@ define void @flat_atomic_add_i64_noret_offset__amdgpu_no_remote_memory(ptr %out, ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw add ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret void } @@ -1084,7 +1084,7 @@ define i64 @flat_atomic_add_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6 ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw add ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret i64 %result } @@ -1149,7 +1149,7 @@ define void @flat_atomic_sub_i64_noret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw sub ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -1210,7 +1210,7 @@ define i64 @flat_atomic_sub_i64_ret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw sub ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -1295,7 +1295,7 @@ define amdgpu_gfx void @flat_atomic_sub_i64_noret_offset_scalar(ptr inreg %out, ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw sub ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -1380,7 +1380,7 @@ define amdgpu_gfx i64 @flat_atomic_sub_i64_ret_offset_scalar(ptr inreg %out, i64 ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw sub ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -1413,7 +1413,7 @@ define void @flat_atomic_sub_i64_noret_offset__amdgpu_no_remote_memory(ptr %out, ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw sub ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret void } @@ -1446,7 +1446,7 @@ define i64 @flat_atomic_sub_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6 ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw sub ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret i64 %result } @@ -1511,7 +1511,7 @@ define void @flat_atomic_and_i64_noret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw and ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -1572,7 +1572,7 @@ define i64 @flat_atomic_and_i64_ret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw and ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -1657,7 +1657,7 @@ define amdgpu_gfx void @flat_atomic_and_i64_noret_offset_scalar(ptr inreg %out, ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw and ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -1742,7 +1742,7 @@ define amdgpu_gfx i64 @flat_atomic_and_i64_ret_offset_scalar(ptr inreg %out, i64 ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw and ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -1775,7 +1775,7 @@ define void @flat_atomic_and_i64_noret_offset__amdgpu_no_remote_memory(ptr %out, ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw and ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret void } @@ -1808,7 +1808,7 @@ define i64 @flat_atomic_and_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6 ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw and ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret i64 %result } @@ -1987,7 +1987,7 @@ define void @flat_atomic_nand_i64_noret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw nand ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -2170,7 +2170,7 @@ define i64 @flat_atomic_nand_i64_ret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: v_mov_b32_e32 v0, v4 ; GFX9-NEXT: v_mov_b32_e32 v1, v5 ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw nand ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -2373,7 +2373,7 @@ define amdgpu_gfx void @flat_atomic_nand_i64_noret_offset_scalar(ptr inreg %out, ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[34:35] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw nand ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -2576,7 +2576,7 @@ define amdgpu_gfx i64 @flat_atomic_nand_i64_ret_offset_scalar(ptr inreg %out, i6 ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[34:35] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw nand ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -2666,7 +2666,7 @@ define void @flat_atomic_nand_i64_noret_offset__amdgpu_no_remote_memory(ptr %out ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw nand ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret void } @@ -2758,7 +2758,7 @@ define i64 @flat_atomic_nand_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i ; GFX9-NEXT: v_mov_b32_e32 v0, v4 ; GFX9-NEXT: v_mov_b32_e32 v1, v5 ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw nand ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret i64 %result } @@ -2823,7 +2823,7 @@ define void @flat_atomic_or_i64_noret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw or ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -2884,7 +2884,7 @@ define i64 @flat_atomic_or_i64_ret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw or ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -2969,7 +2969,7 @@ define amdgpu_gfx void @flat_atomic_or_i64_noret_offset_scalar(ptr inreg %out, i ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw or ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -3054,7 +3054,7 @@ define amdgpu_gfx i64 @flat_atomic_or_i64_ret_offset_scalar(ptr inreg %out, i64 ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw or ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -3087,7 +3087,7 @@ define void @flat_atomic_or_i64_noret_offset__amdgpu_no_remote_memory(ptr %out, ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw or ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret void } @@ -3120,7 +3120,7 @@ define i64 @flat_atomic_or_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i64 ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw or ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret i64 %result } @@ -3185,7 +3185,7 @@ define void @flat_atomic_xor_i64_noret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw xor ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -3246,7 +3246,7 @@ define i64 @flat_atomic_xor_i64_ret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw xor ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -3331,7 +3331,7 @@ define amdgpu_gfx void @flat_atomic_xor_i64_noret_offset_scalar(ptr inreg %out, ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw xor ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -3416,7 +3416,7 @@ define amdgpu_gfx i64 @flat_atomic_xor_i64_ret_offset_scalar(ptr inreg %out, i64 ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw xor ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -3449,7 +3449,7 @@ define void @flat_atomic_xor_i64_noret_offset__amdgpu_no_remote_memory(ptr %out, ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw xor ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret void } @@ -3482,7 +3482,7 @@ define i64 @flat_atomic_xor_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6 ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw xor ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret i64 %result } @@ -3655,7 +3655,7 @@ define void @flat_atomic_max_i64_noret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw max ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -3832,7 +3832,7 @@ define i64 @flat_atomic_max_i64_ret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: v_mov_b32_e32 v0, v4 ; GFX9-NEXT: v_mov_b32_e32 v1, v5 ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw max ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -4041,7 +4041,7 @@ define amdgpu_gfx void @flat_atomic_max_i64_noret_offset_scalar(ptr inreg %out, ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[34:35] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw max ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -4250,7 +4250,7 @@ define amdgpu_gfx i64 @flat_atomic_max_i64_ret_offset_scalar(ptr inreg %out, i64 ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[34:35] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw max ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -4356,8 +4356,8 @@ define amdgpu_kernel void @atomic_max_i64_addr64_offset(ptr %out, i64 %in, i64 % ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw max ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -4472,8 +4472,8 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2, ; GFX9-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; GFX9-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw max ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 store i64 %tmp0, ptr %out2 ret void @@ -4576,7 +4576,7 @@ define amdgpu_kernel void @atomic_max_i64_addr64(ptr %out, i64 %in, i64 %index) ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw max ptr %ptr, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -4687,7 +4687,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %i ; GFX9-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; GFX9-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw max ptr %ptr, i64 %in seq_cst, !noalias.addrspace !1 store i64 %tmp0, ptr %out2 ret void @@ -4775,7 +4775,7 @@ define void @flat_atomic_max_i64_noret_offset__amdgpu_no_remote_memory(ptr %out, ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw max ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret void } @@ -4864,7 +4864,7 @@ define i64 @flat_atomic_max_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6 ; GFX9-NEXT: v_mov_b32_e32 v0, v4 ; GFX9-NEXT: v_mov_b32_e32 v1, v5 ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw max ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret i64 %result } @@ -5037,7 +5037,7 @@ define void @flat_atomic_umax_i64_noret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw umax ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -5214,7 +5214,7 @@ define i64 @flat_atomic_umax_i64_ret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: v_mov_b32_e32 v0, v4 ; GFX9-NEXT: v_mov_b32_e32 v1, v5 ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw umax ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -5423,7 +5423,7 @@ define amdgpu_gfx void @flat_atomic_umax_i64_noret_offset_scalar(ptr inreg %out, ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[34:35] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw umax ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -5632,7 +5632,7 @@ define amdgpu_gfx i64 @flat_atomic_umax_i64_ret_offset_scalar(ptr inreg %out, i6 ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[34:35] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw umax ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -5738,8 +5738,8 @@ define amdgpu_kernel void @atomic_umax_i64_addr64_offset(ptr %out, i64 %in, i64 ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw umax ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -5854,8 +5854,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2 ; GFX9-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; GFX9-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw umax ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 store i64 %tmp0, ptr %out2 ret void @@ -5967,7 +5967,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 % ; GFX9-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; GFX9-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw umax ptr %ptr, i64 %in seq_cst, !noalias.addrspace !1 store i64 %tmp0, ptr %out2 ret void @@ -6055,7 +6055,7 @@ define void @flat_atomic_umax_i64_noret_offset__amdgpu_no_remote_memory(ptr %out ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw umax ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret void } @@ -6144,7 +6144,7 @@ define i64 @flat_atomic_umax_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i ; GFX9-NEXT: v_mov_b32_e32 v0, v4 ; GFX9-NEXT: v_mov_b32_e32 v1, v5 ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw umax ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret i64 %result } @@ -6317,7 +6317,7 @@ define void @flat_atomic_umin_i64_noret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw umin ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -6494,7 +6494,7 @@ define i64 @flat_atomic_umin_i64_ret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: v_mov_b32_e32 v0, v4 ; GFX9-NEXT: v_mov_b32_e32 v1, v5 ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw umin ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -6703,7 +6703,7 @@ define amdgpu_gfx void @flat_atomic_umin_i64_noret_offset_scalar(ptr inreg %out, ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[34:35] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw umin ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -6912,7 +6912,7 @@ define amdgpu_gfx i64 @flat_atomic_umin_i64_ret_offset_scalar(ptr inreg %out, i6 ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[34:35] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw umin ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -6999,7 +6999,7 @@ define void @flat_atomic_umin_i64_noret_offset__amdgpu_no_remote_memory(ptr %out ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw umin ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret void } @@ -7088,7 +7088,7 @@ define i64 @flat_atomic_umin_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i ; GFX9-NEXT: v_mov_b32_e32 v0, v4 ; GFX9-NEXT: v_mov_b32_e32 v1, v5 ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw umin ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret i64 %result } @@ -7261,7 +7261,7 @@ define void @flat_atomic_min_i64_noret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw min ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -7438,7 +7438,7 @@ define i64 @flat_atomic_min_i64_ret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: v_mov_b32_e32 v0, v4 ; GFX9-NEXT: v_mov_b32_e32 v1, v5 ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw min ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -7647,7 +7647,7 @@ define amdgpu_gfx void @flat_atomic_min_i64_noret_offset_scalar(ptr inreg %out, ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[34:35] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw min ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -7856,7 +7856,7 @@ define amdgpu_gfx i64 @flat_atomic_min_i64_ret_offset_scalar(ptr inreg %out, i64 ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[34:35] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw min ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -7962,8 +7962,8 @@ define amdgpu_kernel void @atomic_min_i64_addr64_offset(ptr %out, i64 %in, i64 % ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw min ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -8078,8 +8078,8 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2, ; GFX9-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; GFX9-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index - %gep = getelementptr i64, ptr %ptr, i64 4 + %ptr = getelementptr inbounds i64, ptr %out, i64 %index + %gep = getelementptr inbounds i64, ptr %ptr, i64 4 %tmp0 = atomicrmw min ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 store i64 %tmp0, ptr %out2 ret void @@ -8286,7 +8286,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %i ; GFX9-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; GFX9-NEXT: s_endpgm entry: - %ptr = getelementptr i64, ptr %out, i64 %index + %ptr = getelementptr inbounds i64, ptr %out, i64 %index %tmp0 = atomicrmw min ptr %ptr, i64 %in seq_cst, !noalias.addrspace !1, !noalias.addrspace !1 store i64 %tmp0, ptr %out2 ret void @@ -8374,7 +8374,7 @@ define void @flat_atomic_min_i64_noret_offset__amdgpu_no_remote_memory(ptr %out, ; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw min ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret void } @@ -8463,7 +8463,7 @@ define i64 @flat_atomic_min_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6 ; GFX9-NEXT: v_mov_b32_e32 v0, v4 ; GFX9-NEXT: v_mov_b32_e32 v1, v5 ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw min ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret i64 %result } @@ -8528,7 +8528,7 @@ define void @flat_atomic_uinc_wrap_i64_noret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw uinc_wrap ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -8589,7 +8589,7 @@ define i64 @flat_atomic_uinc_wrap_i64_ret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw uinc_wrap ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -8674,7 +8674,7 @@ define amdgpu_gfx void @flat_atomic_uinc_wrap_i64_noret_offset_scalar(ptr inreg ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw uinc_wrap ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -8759,7 +8759,7 @@ define amdgpu_gfx i64 @flat_atomic_uinc_wrap_i64_ret_offset_scalar(ptr inreg %ou ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw uinc_wrap ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -8792,7 +8792,7 @@ define void @flat_atomic_uinc_wrap_i64_noret_offset__amdgpu_no_remote_memory(ptr ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw uinc_wrap ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret void } @@ -8825,7 +8825,7 @@ define i64 @flat_atomic_uinc_wrap_i64_ret_offset__amdgpu_no_remote_memory(ptr %o ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw uinc_wrap ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret i64 %result } @@ -8890,7 +8890,7 @@ define void @flat_atomic_udec_wrap_i64_noret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw udec_wrap ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -8951,7 +8951,7 @@ define i64 @flat_atomic_udec_wrap_i64_ret_offset(ptr %out, i64 %in) { ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw udec_wrap ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -9036,7 +9036,7 @@ define amdgpu_gfx void @flat_atomic_udec_wrap_i64_noret_offset_scalar(ptr inreg ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw udec_wrap ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret void } @@ -9121,7 +9121,7 @@ define amdgpu_gfx i64 @flat_atomic_udec_wrap_i64_ret_offset_scalar(ptr inreg %ou ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw udec_wrap ptr %gep, i64 %in seq_cst, !noalias.addrspace !1 ret i64 %result } @@ -9154,7 +9154,7 @@ define void @flat_atomic_udec_wrap_i64_noret_offset__amdgpu_no_remote_memory(ptr ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %tmp0 = atomicrmw udec_wrap ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret void } @@ -9187,7 +9187,7 @@ define i64 @flat_atomic_udec_wrap_i64_ret_offset__amdgpu_no_remote_memory(ptr %o ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i64, ptr %out, i64 4 + %gep = getelementptr inbounds i64, ptr %out, i64 4 %result = atomicrmw udec_wrap ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1 ret i64 %result } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.dec.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.dec.ll index 75d2f156bdd2c..985917988e919 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.dec.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.dec.ll @@ -31,7 +31,7 @@ define amdgpu_kernel void @lds_atomic_dec_ret_i32(ptr addrspace(1) %out, ptr add ; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 42 ; GCN: ds_dec_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[K]] offset:16 define amdgpu_kernel void @lds_atomic_dec_ret_i32_offset(ptr addrspace(1) %out, ptr addrspace(3) %ptr) #0 { - %gep = getelementptr i32, ptr addrspace(3) %ptr, i32 4 + %gep = getelementptr inbounds i32, ptr addrspace(3) %ptr, i32 4 %result = call i32 @llvm.amdgcn.atomic.dec.i32.p3(ptr addrspace(3) %gep, i32 42, i32 0, i32 0, i1 false) store i32 %result, ptr addrspace(1) %out ret void @@ -57,7 +57,7 @@ define amdgpu_kernel void @lds_atomic_dec_noret_i32(ptr addrspace(3) %ptr) nounw ; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 42 ; GCN: ds_dec_u32 v{{[0-9]+}}, [[K]] offset:16 define amdgpu_kernel void @lds_atomic_dec_noret_i32_offset(ptr addrspace(3) %ptr) nounwind { - %gep = getelementptr i32, ptr addrspace(3) %ptr, i32 4 + %gep = getelementptr inbounds i32, ptr addrspace(3) %ptr, i32 4 %result = call i32 @llvm.amdgcn.atomic.dec.i32.p3(ptr addrspace(3) %gep, i32 42, i32 0, i32 0, i1 false) ret void } @@ -80,7 +80,7 @@ define amdgpu_kernel void @global_atomic_dec_ret_i32(ptr addrspace(1) %out, ptr ; GFX9-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}} ; GFX9: global_atomic_dec v{{[0-9]+}}, [[ZERO]], [[K]], s{{\[[0-9]+:[0-9]+\]}} offset:16 glc{{$}} define amdgpu_kernel void @global_atomic_dec_ret_i32_offset(ptr addrspace(1) %out, ptr addrspace(1) %ptr) #0 { - %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4 + %gep = getelementptr inbounds i32, ptr addrspace(1) %ptr, i32 4 %result = call i32 @llvm.amdgcn.atomic.dec.i32.p1(ptr addrspace(1) %gep, i32 42, i32 0, i32 0, i1 false) store i32 %result, ptr addrspace(1) %out ret void @@ -104,7 +104,7 @@ define amdgpu_kernel void @global_atomic_dec_noret_i32(ptr addrspace(1) %ptr) no ; GFX9-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}} ; GFX9: global_atomic_dec [[ZERO]], [[K]], s{{\[[0-9]+:[0-9]+\]}} offset:16{{$}} define amdgpu_kernel void @global_atomic_dec_noret_i32_offset(ptr addrspace(1) %ptr) nounwind { - %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4 + %gep = getelementptr inbounds i32, ptr addrspace(1) %ptr, i32 4 %result = call i32 @llvm.amdgcn.atomic.dec.i32.p1(ptr addrspace(1) %gep, i32 42, i32 0, i32 0, i1 false) ret void } @@ -115,9 +115,9 @@ define amdgpu_kernel void @global_atomic_dec_noret_i32_offset(ptr addrspace(1) % ; VI: flat_atomic_dec v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] glc{{$}} define amdgpu_kernel void @global_atomic_dec_ret_i32_offset_addr64(ptr addrspace(1) %out, ptr addrspace(1) %ptr) #0 { %id = call i32 @llvm.amdgcn.workitem.id.x() - %gep.tid = getelementptr i32, ptr addrspace(1) %ptr, i32 %id - %out.gep = getelementptr i32, ptr addrspace(1) %out, i32 %id - %gep = getelementptr i32, ptr addrspace(1) %gep.tid, i32 5 + %gep.tid = getelementptr inbounds i32, ptr addrspace(1) %ptr, i32 %id + %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i32 %id + %gep = getelementptr inbounds i32, ptr addrspace(1) %gep.tid, i32 5 %result = call i32 @llvm.amdgcn.atomic.dec.i32.p1(ptr addrspace(1) %gep, i32 42, i32 0, i32 0, i1 false) store i32 %result, ptr addrspace(1) %out.gep ret void @@ -129,8 +129,8 @@ define amdgpu_kernel void @global_atomic_dec_ret_i32_offset_addr64(ptr addrspace ; VI: flat_atomic_dec v{{\[[0-9]+:[0-9]+\]}}, [[K]]{{$}} define amdgpu_kernel void @global_atomic_dec_noret_i32_offset_addr64(ptr addrspace(1) %ptr) #0 { %id = call i32 @llvm.amdgcn.workitem.id.x() - %gep.tid = getelementptr i32, ptr addrspace(1) %ptr, i32 %id - %gep = getelementptr i32, ptr addrspace(1) %gep.tid, i32 5 + %gep.tid = getelementptr inbounds i32, ptr addrspace(1) %ptr, i32 %id + %gep = getelementptr inbounds i32, ptr addrspace(1) %gep.tid, i32 5 %result = call i32 @llvm.amdgcn.atomic.dec.i32.p1(ptr addrspace(1) %gep, i32 42, i32 0, i32 0, i1 false) ret void } @@ -149,7 +149,7 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32(ptr %out, ptr %ptr) #0 { ; CIVI: flat_atomic_dec v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] glc{{$}} ; GFX9: flat_atomic_dec v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] offset:16 glc{{$}} define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset(ptr %out, ptr %ptr) #0 { - %gep = getelementptr i32, ptr %ptr, i32 4 + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %result = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr %gep, i32 42, i32 0, i32 0, i1 false) store i32 %result, ptr %out ret void @@ -168,7 +168,7 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i32(ptr %ptr) nounwind { ; CIVI: flat_atomic_dec v{{\[[0-9]+:[0-9]+\]}}, [[K]]{{$}} ; GFX9: flat_atomic_dec v{{\[[0-9]+:[0-9]+\]}}, [[K]] offset:16{{$}} define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset(ptr %ptr) nounwind { - %gep = getelementptr i32, ptr %ptr, i32 4 + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %result = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr %gep, i32 42, i32 0, i32 0, i1 false) ret void } @@ -179,9 +179,9 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset(ptr %ptr) nounwind { ; GFX9: flat_atomic_dec v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] offset:20 glc{{$}} define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset_addr64(ptr %out, ptr %ptr) #0 { %id = call i32 @llvm.amdgcn.workitem.id.x() - %gep.tid = getelementptr i32, ptr %ptr, i32 %id - %out.gep = getelementptr i32, ptr %out, i32 %id - %gep = getelementptr i32, ptr %gep.tid, i32 5 + %gep.tid = getelementptr inbounds i32, ptr %ptr, i32 %id + %out.gep = getelementptr inbounds i32, ptr %out, i32 %id + %gep = getelementptr inbounds i32, ptr %gep.tid, i32 5 %result = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr %gep, i32 42, i32 0, i32 0, i1 false) store i32 %result, ptr %out.gep ret void @@ -193,8 +193,8 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset_addr64(ptr %out, ptr % ; GFX9: flat_atomic_dec v{{\[[0-9]+:[0-9]+\]}}, [[K]] offset:20{{$}} define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset_addr64(ptr %ptr) #0 { %id = call i32 @llvm.amdgcn.workitem.id.x() - %gep.tid = getelementptr i32, ptr %ptr, i32 %id - %gep = getelementptr i32, ptr %gep.tid, i32 5 + %gep.tid = getelementptr inbounds i32, ptr %ptr, i32 %id + %gep = getelementptr inbounds i32, ptr %gep.tid, i32 5 %result = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr %gep, i32 42, i32 0, i32 0, i1 false) ret void } @@ -215,7 +215,7 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i64(ptr %out, ptr %ptr) #0 { ; CIVI: flat_atomic_dec_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v[[[KLO]]:[[KHI]]] glc{{$}} ; GFX9: flat_atomic_dec_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v[[[KLO]]:[[KHI]]] offset:32 glc{{$}} define amdgpu_kernel void @flat_atomic_dec_ret_i64_offset(ptr %out, ptr %ptr) #0 { - %gep = getelementptr i64, ptr %ptr, i32 4 + %gep = getelementptr inbounds i64, ptr %ptr, i32 4 %result = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr %gep, i64 42, i32 0, i32 0, i1 false) store i64 %result, ptr %out ret void @@ -236,7 +236,7 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64(ptr %ptr) nounwind { ; CIVI: flat_atomic_dec_x2 v{{\[[0-9]+:[0-9]+\]}}, v[[[KLO]]:[[KHI]]{{\]$}} ; GFX9: flat_atomic_dec_x2 v{{\[[0-9]+:[0-9]+\]}}, v[[[KLO]]:[[KHI]]] offset:32{{$}} define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset(ptr %ptr) nounwind { - %gep = getelementptr i64, ptr %ptr, i32 4 + %gep = getelementptr inbounds i64, ptr %ptr, i32 4 %result = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr %gep, i64 42, i32 0, i32 0, i1 false) ret void } @@ -248,9 +248,9 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset(ptr %ptr) nounwind { ; GFX9: flat_atomic_dec_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v[[[KLO]]:[[KHI]]] offset:40 glc{{$}} define amdgpu_kernel void @flat_atomic_dec_ret_i64_offset_addr64(ptr %out, ptr %ptr) #0 { %id = call i32 @llvm.amdgcn.workitem.id.x() - %gep.tid = getelementptr i64, ptr %ptr, i32 %id - %out.gep = getelementptr i64, ptr %out, i32 %id - %gep = getelementptr i64, ptr %gep.tid, i32 5 + %gep.tid = getelementptr inbounds i64, ptr %ptr, i32 %id + %out.gep = getelementptr inbounds i64, ptr %out, i32 %id + %gep = getelementptr inbounds i64, ptr %gep.tid, i32 5 %result = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr %gep, i64 42, i32 0, i32 0, i1 false) store i64 %result, ptr %out.gep ret void @@ -263,8 +263,8 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i64_offset_addr64(ptr %out, ptr % ; GFX9: flat_atomic_dec_x2 v{{\[[0-9]+:[0-9]+\]}}, v[[[KLO]]:[[KHI]]] offset:40{{$}} define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset_addr64(ptr %ptr) #0 { %id = call i32 @llvm.amdgcn.workitem.id.x() - %gep.tid = getelementptr i64, ptr %ptr, i32 %id - %gep = getelementptr i64, ptr %gep.tid, i32 5 + %gep.tid = getelementptr inbounds i64, ptr %ptr, i32 %id + %gep = getelementptr inbounds i64, ptr %gep.tid, i32 5 %result = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr %gep, i64 42, i32 0, i32 0, i1 false) ret void } @@ -280,7 +280,7 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset_addr64(ptr %ptr) #0 define amdgpu_kernel void @atomic_dec_shl_base_lds_0(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 - %arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(3) @lds0, i32 0, i32 %idx.0 + %arrayidx0 = getelementptr inbounds inbounds [512 x i32], ptr addrspace(3) @lds0, i32 0, i32 %idx.0 %val0 = call i32 @llvm.amdgcn.atomic.dec.i32.p3(ptr addrspace(3) %arrayidx0, i32 9, i32 0, i32 0, i1 false) store i32 %idx.0, ptr addrspace(1) %add_use store i32 %val0, ptr addrspace(1) %out @@ -308,7 +308,7 @@ define amdgpu_kernel void @lds_atomic_dec_ret_i64(ptr addrspace(1) %out, ptr add ; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}} ; GCN: ds_dec_rtn_u64 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, v[[[KLO]]:[[KHI]]] offset:32 define amdgpu_kernel void @lds_atomic_dec_ret_i64_offset(ptr addrspace(1) %out, ptr addrspace(3) %ptr) #0 { - %gep = getelementptr i64, ptr addrspace(3) %ptr, i32 4 + %gep = getelementptr inbounds i64, ptr addrspace(3) %ptr, i32 4 %result = call i64 @llvm.amdgcn.atomic.dec.i64.p3(ptr addrspace(3) %gep, i64 42, i32 0, i32 0, i1 false) store i64 %result, ptr addrspace(1) %out ret void @@ -334,7 +334,7 @@ define amdgpu_kernel void @lds_atomic_dec_noret_i64(ptr addrspace(3) %ptr) nounw ; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}} ; GCN: ds_dec_u64 v{{[0-9]+}}, v[[[KLO]]:[[KHI]]] offset:32{{$}} define amdgpu_kernel void @lds_atomic_dec_noret_i64_offset(ptr addrspace(3) %ptr) nounwind { - %gep = getelementptr i64, ptr addrspace(3) %ptr, i32 4 + %gep = getelementptr inbounds i64, ptr addrspace(3) %ptr, i32 4 %result = call i64 @llvm.amdgcn.atomic.dec.i64.p3(ptr addrspace(3) %gep, i64 42, i32 0, i32 0, i1 false) ret void } @@ -359,7 +359,7 @@ define amdgpu_kernel void @global_atomic_dec_ret_i64(ptr addrspace(1) %out, ptr ; CIVI: buffer_atomic_dec_x2 v[[[KLO]]:[[KHI]]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:32 glc{{$}} ; GFX9: global_atomic_dec_x2 v{{\[[0-9]+:[0-9]+\]}}, v[[ZERO]], v[[[KLO]]:[[KHI]]], s{{\[[0-9]+:[0-9]+\]}} offset:32 glc{{$}} define amdgpu_kernel void @global_atomic_dec_ret_i64_offset(ptr addrspace(1) %out, ptr addrspace(1) %ptr) #0 { - %gep = getelementptr i64, ptr addrspace(1) %ptr, i32 4 + %gep = getelementptr inbounds i64, ptr addrspace(1) %ptr, i32 4 %result = call i64 @llvm.amdgcn.atomic.dec.i64.p1(ptr addrspace(1) %gep, i64 42, i32 0, i32 0, i1 false) store i64 %result, ptr addrspace(1) %out ret void @@ -383,7 +383,7 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64(ptr addrspace(1) %ptr) no ; CIVI: buffer_atomic_dec_x2 v[[[KLO]]:[[KHI]]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:32{{$}} ; GFX9: global_atomic_dec_x2 v[[ZERO]], v[[[KLO]]:[[KHI]]], s{{\[[0-9]+:[0-9]+\]}} offset:32{{$}} define amdgpu_kernel void @global_atomic_dec_noret_i64_offset(ptr addrspace(1) %ptr) nounwind { - %gep = getelementptr i64, ptr addrspace(1) %ptr, i32 4 + %gep = getelementptr inbounds i64, ptr addrspace(1) %ptr, i32 4 %result = call i64 @llvm.amdgcn.atomic.dec.i64.p1(ptr addrspace(1) %gep, i64 42, i32 0, i32 0, i1 false) ret void } @@ -396,9 +396,9 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64_offset(ptr addrspace(1) % ; VI: flat_atomic_dec_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v[[[KLO]]:[[KHI]]] glc{{$}} define amdgpu_kernel void @global_atomic_dec_ret_i64_offset_addr64(ptr addrspace(1) %out, ptr addrspace(1) %ptr) #0 { %id = call i32 @llvm.amdgcn.workitem.id.x() - %gep.tid = getelementptr i64, ptr addrspace(1) %ptr, i32 %id - %out.gep = getelementptr i64, ptr addrspace(1) %out, i32 %id - %gep = getelementptr i64, ptr addrspace(1) %gep.tid, i32 5 + %gep.tid = getelementptr inbounds i64, ptr addrspace(1) %ptr, i32 %id + %out.gep = getelementptr inbounds i64, ptr addrspace(1) %out, i32 %id + %gep = getelementptr inbounds i64, ptr addrspace(1) %gep.tid, i32 5 %result = call i64 @llvm.amdgcn.atomic.dec.i64.p1(ptr addrspace(1) %gep, i64 42, i32 0, i32 0, i1 false) store i64 %result, ptr addrspace(1) %out.gep ret void @@ -412,8 +412,8 @@ define amdgpu_kernel void @global_atomic_dec_ret_i64_offset_addr64(ptr addrspace ; VI: flat_atomic_dec_x2 v{{\[[0-9]+:[0-9]+\]}}, v[[[KLO]]:[[KHI]]]{{$}} define amdgpu_kernel void @global_atomic_dec_noret_i64_offset_addr64(ptr addrspace(1) %ptr) #0 { %id = call i32 @llvm.amdgcn.workitem.id.x() - %gep.tid = getelementptr i64, ptr addrspace(1) %ptr, i32 %id - %gep = getelementptr i64, ptr addrspace(1) %gep.tid, i32 5 + %gep.tid = getelementptr inbounds i64, ptr addrspace(1) %ptr, i32 %id + %gep = getelementptr inbounds i64, ptr addrspace(1) %gep.tid, i32 5 %result = call i64 @llvm.amdgcn.atomic.dec.i64.p1(ptr addrspace(1) %gep, i64 42, i32 0, i32 0, i1 false) ret void } @@ -429,7 +429,7 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64_offset_addr64(ptr addrspa define amdgpu_kernel void @atomic_dec_shl_base_lds_0_i64(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 - %arrayidx0 = getelementptr inbounds [512 x i64], ptr addrspace(3) @lds1, i32 0, i32 %idx.0 + %arrayidx0 = getelementptr inbounds inbounds [512 x i64], ptr addrspace(3) @lds1, i32 0, i32 %idx.0 %val0 = call i64 @llvm.amdgcn.atomic.dec.i64.p3(ptr addrspace(3) %arrayidx0, i64 9, i32 0, i32 0, i1 false) store i32 %idx.0, ptr addrspace(1) %add_use store i64 %val0, ptr addrspace(1) %out diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.inc.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.inc.ll index b28405f4ff113..fe6d5832e6f1e 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.inc.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.inc.ll @@ -31,7 +31,7 @@ define amdgpu_kernel void @lds_atomic_inc_ret_i32(ptr addrspace(1) %out, ptr add ; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 42 ; GCN: ds_inc_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[K]] offset:16 define amdgpu_kernel void @lds_atomic_inc_ret_i32_offset(ptr addrspace(1) %out, ptr addrspace(3) %ptr) #0 { - %gep = getelementptr i32, ptr addrspace(3) %ptr, i32 4 + %gep = getelementptr inbounds i32, ptr addrspace(3) %ptr, i32 4 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p3(ptr addrspace(3) %gep, i32 42, i32 0, i32 0, i1 false) store i32 %result, ptr addrspace(1) %out ret void @@ -57,7 +57,7 @@ define amdgpu_kernel void @lds_atomic_inc_noret_i32(ptr addrspace(3) %ptr) nounw ; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 42 ; GCN: ds_inc_u32 v{{[0-9]+}}, [[K]] offset:16 define amdgpu_kernel void @lds_atomic_inc_noret_i32_offset(ptr addrspace(3) %ptr) nounwind { - %gep = getelementptr i32, ptr addrspace(3) %ptr, i32 4 + %gep = getelementptr inbounds i32, ptr addrspace(3) %ptr, i32 4 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p3(ptr addrspace(3) %gep, i32 42, i32 0, i32 0, i1 false) ret void } @@ -77,7 +77,7 @@ define amdgpu_kernel void @global_atomic_inc_ret_i32(ptr addrspace(1) %out, ptr ; CIVI: buffer_atomic_inc [[K]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:16 glc{{$}} ; GFX9: global_atomic_inc v{{[0-9]+}}, v{{[0-9]+}}, [[K]], s{{\[[0-9]+:[0-9]+\]}} offset:16 glc{{$}} define amdgpu_kernel void @global_atomic_inc_ret_i32_offset(ptr addrspace(1) %out, ptr addrspace(1) %ptr) #0 { - %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4 + %gep = getelementptr inbounds i32, ptr addrspace(1) %ptr, i32 4 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1(ptr addrspace(1) %gep, i32 42, i32 0, i32 0, i1 false) store i32 %result, ptr addrspace(1) %out ret void @@ -97,7 +97,7 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32(ptr addrspace(1) %ptr) no ; CIVI: buffer_atomic_inc [[K]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:16{{$}} ; GFX9: global_atomic_inc v{{[0-9]+}}, [[K]], s{{\[[0-9]+:[0-9]+\]}} offset:16{{$}} define amdgpu_kernel void @global_atomic_inc_noret_i32_offset(ptr addrspace(1) %ptr) nounwind { - %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4 + %gep = getelementptr inbounds i32, ptr addrspace(1) %ptr, i32 4 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1(ptr addrspace(1) %gep, i32 42, i32 0, i32 0, i1 false) ret void } @@ -108,9 +108,9 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32_offset(ptr addrspace(1) % ; VI: flat_atomic_inc v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] glc{{$}} define amdgpu_kernel void @global_atomic_inc_ret_i32_offset_addr64(ptr addrspace(1) %out, ptr addrspace(1) %ptr) #0 { %id = call i32 @llvm.amdgcn.workitem.id.x() - %gep.tid = getelementptr i32, ptr addrspace(1) %ptr, i32 %id - %out.gep = getelementptr i32, ptr addrspace(1) %out, i32 %id - %gep = getelementptr i32, ptr addrspace(1) %gep.tid, i32 5 + %gep.tid = getelementptr inbounds i32, ptr addrspace(1) %ptr, i32 %id + %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i32 %id + %gep = getelementptr inbounds i32, ptr addrspace(1) %gep.tid, i32 5 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1(ptr addrspace(1) %gep, i32 42, i32 0, i32 0, i1 false) store i32 %result, ptr addrspace(1) %out.gep ret void @@ -122,8 +122,8 @@ define amdgpu_kernel void @global_atomic_inc_ret_i32_offset_addr64(ptr addrspace ; VI: flat_atomic_inc v{{\[[0-9]+:[0-9]+\]}}, [[K]]{{$}} define amdgpu_kernel void @global_atomic_inc_noret_i32_offset_addr64(ptr addrspace(1) %ptr) #0 { %id = call i32 @llvm.amdgcn.workitem.id.x() - %gep.tid = getelementptr i32, ptr addrspace(1) %ptr, i32 %id - %gep = getelementptr i32, ptr addrspace(1) %gep.tid, i32 5 + %gep.tid = getelementptr inbounds i32, ptr addrspace(1) %ptr, i32 %id + %gep = getelementptr inbounds i32, ptr addrspace(1) %gep.tid, i32 5 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1(ptr addrspace(1) %gep, i32 42, i32 0, i32 0, i1 false) ret void } @@ -136,7 +136,7 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32_offset_addr64(ptr addrspa define amdgpu_kernel void @atomic_inc_shl_base_lds_0_i32(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 - %arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(3) @lds0, i32 0, i32 %idx.0 + %arrayidx0 = getelementptr inbounds inbounds [512 x i32], ptr addrspace(3) @lds0, i32 0, i32 %idx.0 %val0 = call i32 @llvm.amdgcn.atomic.inc.i32.p3(ptr addrspace(3) %arrayidx0, i32 9, i32 0, i32 0, i1 false) store i32 %idx.0, ptr addrspace(1) %add_use store i32 %val0, ptr addrspace(1) %out @@ -158,7 +158,7 @@ define amdgpu_kernel void @lds_atomic_inc_ret_i64(ptr addrspace(1) %out, ptr add ; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}} ; GCN: ds_inc_rtn_u64 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, v[[[KLO]]:[[KHI]]] offset:32 define amdgpu_kernel void @lds_atomic_inc_ret_i64_offset(ptr addrspace(1) %out, ptr addrspace(3) %ptr) #0 { - %gep = getelementptr i64, ptr addrspace(3) %ptr, i32 4 + %gep = getelementptr inbounds i64, ptr addrspace(3) %ptr, i32 4 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p3(ptr addrspace(3) %gep, i64 42, i32 0, i32 0, i1 false) store i64 %result, ptr addrspace(1) %out ret void @@ -178,7 +178,7 @@ define amdgpu_kernel void @lds_atomic_inc_noret_i64(ptr addrspace(3) %ptr) nounw ; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}} ; GCN: ds_inc_u64 v{{[0-9]+}}, v[[[KLO]]:[[KHI]]] offset:32{{$}} define amdgpu_kernel void @lds_atomic_inc_noret_i64_offset(ptr addrspace(3) %ptr) nounwind { - %gep = getelementptr i64, ptr addrspace(3) %ptr, i32 4 + %gep = getelementptr inbounds i64, ptr addrspace(3) %ptr, i32 4 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p3(ptr addrspace(3) %gep, i64 42, i32 0, i32 0, i1 false) ret void } @@ -202,7 +202,7 @@ define amdgpu_kernel void @global_atomic_inc_ret_i64(ptr addrspace(1) %out, ptr ; CIVI: buffer_atomic_inc_x2 v[[[KLO]]:[[KHI]]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:32 glc{{$}} ; GFX9: global_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v[[ZERO]], v[[[KLO]]:[[KHI]]], s{{\[[0-9]+:[0-9]+\]}} offset:32 glc{{$}} define amdgpu_kernel void @global_atomic_inc_ret_i64_offset(ptr addrspace(1) %out, ptr addrspace(1) %ptr) #0 { - %gep = getelementptr i64, ptr addrspace(1) %ptr, i32 4 + %gep = getelementptr inbounds i64, ptr addrspace(1) %ptr, i32 4 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1(ptr addrspace(1) %gep, i64 42, i32 0, i32 0, i1 false) store i64 %result, ptr addrspace(1) %out ret void @@ -227,7 +227,7 @@ define amdgpu_kernel void @global_atomic_inc_noret_i64(ptr addrspace(1) %ptr) no ; CIVI: buffer_atomic_inc_x2 v[[[KLO]]:[[KHI]]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:32{{$}} ; GFX9: global_atomic_inc_x2 v[[ZERO]], v[[[KLO]]:[[KHI]]], s{{\[[0-9]+:[0-9]+\]}} offset:32{{$}} define amdgpu_kernel void @global_atomic_inc_noret_i64_offset(ptr addrspace(1) %ptr) nounwind { - %gep = getelementptr i64, ptr addrspace(1) %ptr, i32 4 + %gep = getelementptr inbounds i64, ptr addrspace(1) %ptr, i32 4 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1(ptr addrspace(1) %gep, i64 42, i32 0, i32 0, i1 false) ret void } @@ -240,9 +240,9 @@ define amdgpu_kernel void @global_atomic_inc_noret_i64_offset(ptr addrspace(1) % ; VI: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v[[[KLO]]:[[KHI]]] glc{{$}} define amdgpu_kernel void @global_atomic_inc_ret_i64_offset_addr64(ptr addrspace(1) %out, ptr addrspace(1) %ptr) #0 { %id = call i32 @llvm.amdgcn.workitem.id.x() - %gep.tid = getelementptr i64, ptr addrspace(1) %ptr, i32 %id - %out.gep = getelementptr i64, ptr addrspace(1) %out, i32 %id - %gep = getelementptr i64, ptr addrspace(1) %gep.tid, i32 5 + %gep.tid = getelementptr inbounds i64, ptr addrspace(1) %ptr, i32 %id + %out.gep = getelementptr inbounds i64, ptr addrspace(1) %out, i32 %id + %gep = getelementptr inbounds i64, ptr addrspace(1) %gep.tid, i32 5 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1(ptr addrspace(1) %gep, i64 42, i32 0, i32 0, i1 false) store i64 %result, ptr addrspace(1) %out.gep ret void @@ -256,8 +256,8 @@ define amdgpu_kernel void @global_atomic_inc_ret_i64_offset_addr64(ptr addrspace ; VI: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v[[[KLO]]:[[KHI]]]{{$}} define amdgpu_kernel void @global_atomic_inc_noret_i64_offset_addr64(ptr addrspace(1) %ptr) #0 { %id = call i32 @llvm.amdgcn.workitem.id.x() - %gep.tid = getelementptr i64, ptr addrspace(1) %ptr, i32 %id - %gep = getelementptr i64, ptr addrspace(1) %gep.tid, i32 5 + %gep.tid = getelementptr inbounds i64, ptr addrspace(1) %ptr, i32 %id + %gep = getelementptr inbounds i64, ptr addrspace(1) %gep.tid, i32 5 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1(ptr addrspace(1) %gep, i64 42, i32 0, i32 0, i1 false) ret void } @@ -276,7 +276,7 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32(ptr %out, ptr %ptr) #0 { ; CIVI: flat_atomic_inc v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] glc{{$}} ; GFX9: flat_atomic_inc v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] offset:16 glc{{$}} define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset(ptr %out, ptr %ptr) #0 { - %gep = getelementptr i32, ptr %ptr, i32 4 + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr %gep, i32 42, i32 0, i32 0, i1 false) store i32 %result, ptr %out ret void @@ -295,7 +295,7 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32(ptr %ptr) nounwind { ; CIVI: flat_atomic_inc v{{\[[0-9]+:[0-9]+\]}}, [[K]]{{$}} ; GFX9: flat_atomic_inc v{{\[[0-9]+:[0-9]+\]}}, [[K]] offset:16{{$}} define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset(ptr %ptr) nounwind { - %gep = getelementptr i32, ptr %ptr, i32 4 + %gep = getelementptr inbounds i32, ptr %ptr, i32 4 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr %gep, i32 42, i32 0, i32 0, i1 false) ret void } @@ -306,9 +306,9 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset(ptr %ptr) nounwind { ; GFX9: flat_atomic_inc v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] offset:20 glc{{$}} define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset_addr64(ptr %out, ptr %ptr) #0 { %id = call i32 @llvm.amdgcn.workitem.id.x() - %gep.tid = getelementptr i32, ptr %ptr, i32 %id - %out.gep = getelementptr i32, ptr %out, i32 %id - %gep = getelementptr i32, ptr %gep.tid, i32 5 + %gep.tid = getelementptr inbounds i32, ptr %ptr, i32 %id + %out.gep = getelementptr inbounds i32, ptr %out, i32 %id + %gep = getelementptr inbounds i32, ptr %gep.tid, i32 5 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr %gep, i32 42, i32 0, i32 0, i1 false) store i32 %result, ptr %out.gep ret void @@ -320,8 +320,8 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset_addr64(ptr %out, ptr % ; GFX9: flat_atomic_inc v{{\[[0-9]+:[0-9]+\]}}, [[K]] offset:20{{$}} define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset_addr64(ptr %ptr) #0 { %id = call i32 @llvm.amdgcn.workitem.id.x() - %gep.tid = getelementptr i32, ptr %ptr, i32 %id - %gep = getelementptr i32, ptr %gep.tid, i32 5 + %gep.tid = getelementptr inbounds i32, ptr %ptr, i32 %id + %gep = getelementptr inbounds i32, ptr %gep.tid, i32 5 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr %gep, i32 42, i32 0, i32 0, i1 false) ret void } @@ -334,7 +334,7 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset_addr64(ptr %ptr) #0 define amdgpu_kernel void @atomic_inc_shl_base_lds_0_i64(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 - %arrayidx0 = getelementptr inbounds [512 x i64], ptr addrspace(3) @lds1, i32 0, i32 %idx.0 + %arrayidx0 = getelementptr inbounds inbounds [512 x i64], ptr addrspace(3) @lds1, i32 0, i32 %idx.0 %val0 = call i64 @llvm.amdgcn.atomic.inc.i64.p3(ptr addrspace(3) %arrayidx0, i64 9, i32 0, i32 0, i1 false) store i32 %idx.0, ptr addrspace(1) %add_use store i64 %val0, ptr addrspace(1) %out @@ -357,7 +357,7 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64(ptr %out, ptr %ptr) #0 { ; CIVI: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v[[[KLO]]:[[KHI]]] glc{{$}} ; GFX9: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v[[[KLO]]:[[KHI]]] offset:32 glc{{$}} define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset(ptr %out, ptr %ptr) #0 { - %gep = getelementptr i64, ptr %ptr, i32 4 + %gep = getelementptr inbounds i64, ptr %ptr, i32 4 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr %gep, i64 42, i32 0, i32 0, i1 false) store i64 %result, ptr %out ret void @@ -378,7 +378,7 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64(ptr %ptr) nounwind { ; CIVI: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v[[[KLO]]:[[KHI]]{{\]$}} ; GFX9: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v[[[KLO]]:[[KHI]]] offset:32{{$}} define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset(ptr %ptr) nounwind { - %gep = getelementptr i64, ptr %ptr, i32 4 + %gep = getelementptr inbounds i64, ptr %ptr, i32 4 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr %gep, i64 42, i32 0, i32 0, i1 false) ret void } @@ -390,9 +390,9 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset(ptr %ptr) nounwind { ; GFX9: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v[[[KLO]]:[[KHI]]] offset:40 glc{{$}} define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset_addr64(ptr %out, ptr %ptr) #0 { %id = call i32 @llvm.amdgcn.workitem.id.x() - %gep.tid = getelementptr i64, ptr %ptr, i32 %id - %out.gep = getelementptr i64, ptr %out, i32 %id - %gep = getelementptr i64, ptr %gep.tid, i32 5 + %gep.tid = getelementptr inbounds i64, ptr %ptr, i32 %id + %out.gep = getelementptr inbounds i64, ptr %out, i32 %id + %gep = getelementptr inbounds i64, ptr %gep.tid, i32 5 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr %gep, i64 42, i32 0, i32 0, i1 false) store i64 %result, ptr %out.gep ret void @@ -405,8 +405,8 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset_addr64(ptr %out, ptr % ; GFX9: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v[[[KLO]]:[[KHI]]] offset:40{{$}} define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset_addr64(ptr %ptr) #0 { %id = call i32 @llvm.amdgcn.workitem.id.x() - %gep.tid = getelementptr i64, ptr %ptr, i32 %id - %gep = getelementptr i64, ptr %gep.tid, i32 5 + %gep.tid = getelementptr inbounds i64, ptr %ptr, i32 %id + %gep = getelementptr inbounds i64, ptr %gep.tid, i32 5 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr %gep, i64 42, i32 0, i32 0, i1 false) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-agent.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-agent.ll index 07ad8cb0c4a3d..afe1afb910efa 100644 --- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-agent.ll +++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-agent.ll @@ -3266,7 +3266,7 @@ define amdgpu_kernel void @flat_agent_monotonic_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") monotonic monotonic ret void } @@ -3535,7 +3535,7 @@ define amdgpu_kernel void @flat_agent_acquire_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") acquire monotonic ret void } @@ -3797,7 +3797,7 @@ define amdgpu_kernel void @flat_agent_release_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") release monotonic ret void } @@ -4090,7 +4090,7 @@ define amdgpu_kernel void @flat_agent_acq_rel_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") acq_rel monotonic ret void } @@ -4383,7 +4383,7 @@ define amdgpu_kernel void @flat_agent_seq_cst_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") seq_cst monotonic ret void } @@ -4652,7 +4652,7 @@ define amdgpu_kernel void @flat_agent_monotonic_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") monotonic acquire ret void } @@ -4921,7 +4921,7 @@ define amdgpu_kernel void @flat_agent_acquire_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") acquire acquire ret void } @@ -5214,7 +5214,7 @@ define amdgpu_kernel void @flat_agent_release_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") release acquire ret void } @@ -5507,7 +5507,7 @@ define amdgpu_kernel void @flat_agent_acq_rel_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") acq_rel acquire ret void } @@ -5800,7 +5800,7 @@ define amdgpu_kernel void @flat_agent_seq_cst_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") seq_cst acquire ret void } @@ -6093,7 +6093,7 @@ define amdgpu_kernel void @flat_agent_monotonic_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") monotonic seq_cst ret void } @@ -6386,7 +6386,7 @@ define amdgpu_kernel void @flat_agent_acquire_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") acquire seq_cst ret void } @@ -6679,7 +6679,7 @@ define amdgpu_kernel void @flat_agent_release_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") release seq_cst ret void } @@ -6972,7 +6972,7 @@ define amdgpu_kernel void @flat_agent_acq_rel_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") acq_rel seq_cst ret void } @@ -7265,7 +7265,7 @@ define amdgpu_kernel void @flat_agent_seq_cst_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst ret void } @@ -7547,7 +7547,7 @@ define amdgpu_kernel void @flat_agent_monotonic_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") monotonic monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -7846,7 +7846,7 @@ define amdgpu_kernel void @flat_agent_acquire_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") acquire monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -8154,7 +8154,7 @@ define amdgpu_kernel void @flat_agent_release_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") release monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -8481,7 +8481,7 @@ define amdgpu_kernel void @flat_agent_acq_rel_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") acq_rel monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -8808,7 +8808,7 @@ define amdgpu_kernel void @flat_agent_seq_cst_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") seq_cst monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -9111,7 +9111,7 @@ define amdgpu_kernel void @flat_agent_monotonic_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") monotonic acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -9410,7 +9410,7 @@ define amdgpu_kernel void @flat_agent_acquire_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") acquire acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -9737,7 +9737,7 @@ define amdgpu_kernel void @flat_agent_release_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") release acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -10064,7 +10064,7 @@ define amdgpu_kernel void @flat_agent_acq_rel_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") acq_rel acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -10391,7 +10391,7 @@ define amdgpu_kernel void @flat_agent_seq_cst_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") seq_cst acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -10718,7 +10718,7 @@ define amdgpu_kernel void @flat_agent_monotonic_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") monotonic seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -11041,7 +11041,7 @@ define amdgpu_kernel void @flat_agent_acquire_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") acquire seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -11368,7 +11368,7 @@ define amdgpu_kernel void @flat_agent_release_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") release seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -11695,7 +11695,7 @@ define amdgpu_kernel void @flat_agent_acq_rel_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") acq_rel seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -12022,7 +12022,7 @@ define amdgpu_kernel void @flat_agent_seq_cst_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -15321,7 +15321,7 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") monotonic monotonic ret void } @@ -15586,7 +15586,7 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") acquire monotonic ret void } @@ -15848,7 +15848,7 @@ define amdgpu_kernel void @flat_agent_one_as_release_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") release monotonic ret void } @@ -16137,7 +16137,7 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") acq_rel monotonic ret void } @@ -16426,7 +16426,7 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") seq_cst monotonic ret void } @@ -16691,7 +16691,7 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") monotonic acquire ret void } @@ -16956,7 +16956,7 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") acquire acquire ret void } @@ -17245,7 +17245,7 @@ define amdgpu_kernel void @flat_agent_one_as_release_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") release acquire ret void } @@ -17534,7 +17534,7 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") acq_rel acquire ret void } @@ -17823,7 +17823,7 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") seq_cst acquire ret void } @@ -18112,7 +18112,7 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") monotonic seq_cst ret void } @@ -18401,7 +18401,7 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") acquire seq_cst ret void } @@ -18690,7 +18690,7 @@ define amdgpu_kernel void @flat_agent_one_as_release_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") release seq_cst ret void } @@ -18979,7 +18979,7 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") acq_rel seq_cst ret void } @@ -19268,7 +19268,7 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") seq_cst seq_cst ret void } @@ -19550,7 +19550,7 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") monotonic monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -19859,7 +19859,7 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") acquire monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -20167,7 +20167,7 @@ define amdgpu_kernel void @flat_agent_one_as_release_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") release monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -20504,7 +20504,7 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") acq_rel monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -20841,7 +20841,7 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") seq_cst monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -21154,7 +21154,7 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") monotonic acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -21463,7 +21463,7 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") acquire acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -21800,7 +21800,7 @@ define amdgpu_kernel void @flat_agent_one_as_release_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") release acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -22137,7 +22137,7 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") acq_rel acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -22474,7 +22474,7 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") seq_cst acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -22811,7 +22811,7 @@ define amdgpu_kernel void @flat_agent_one_as_monotonic_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") monotonic seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -23144,7 +23144,7 @@ define amdgpu_kernel void @flat_agent_one_as_acquire_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") acquire seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -23481,7 +23481,7 @@ define amdgpu_kernel void @flat_agent_one_as_release_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") release seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -23818,7 +23818,7 @@ define amdgpu_kernel void @flat_agent_one_as_acq_rel_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") acq_rel seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -24155,7 +24155,7 @@ define amdgpu_kernel void @flat_agent_one_as_seq_cst_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent-one-as") seq_cst seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-singlethread.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-singlethread.ll index b88a10ab24a98..fbc69704c5faf 100644 --- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-singlethread.ll +++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-singlethread.ll @@ -2896,7 +2896,7 @@ define amdgpu_kernel void @flat_singlethread_monotonic_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") monotonic monotonic ret void } @@ -3134,7 +3134,7 @@ define amdgpu_kernel void @flat_singlethread_acquire_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") acquire monotonic ret void } @@ -3372,7 +3372,7 @@ define amdgpu_kernel void @flat_singlethread_release_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") release monotonic ret void } @@ -3610,7 +3610,7 @@ define amdgpu_kernel void @flat_singlethread_acq_rel_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") acq_rel monotonic ret void } @@ -3848,7 +3848,7 @@ define amdgpu_kernel void @flat_singlethread_seq_cst_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") seq_cst monotonic ret void } @@ -4086,7 +4086,7 @@ define amdgpu_kernel void @flat_singlethread_monotonic_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") monotonic acquire ret void } @@ -4324,7 +4324,7 @@ define amdgpu_kernel void @flat_singlethread_acquire_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") acquire acquire ret void } @@ -4562,7 +4562,7 @@ define amdgpu_kernel void @flat_singlethread_release_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") release acquire ret void } @@ -4800,7 +4800,7 @@ define amdgpu_kernel void @flat_singlethread_acq_rel_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") acq_rel acquire ret void } @@ -5038,7 +5038,7 @@ define amdgpu_kernel void @flat_singlethread_seq_cst_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") seq_cst acquire ret void } @@ -5276,7 +5276,7 @@ define amdgpu_kernel void @flat_singlethread_monotonic_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") monotonic seq_cst ret void } @@ -5514,7 +5514,7 @@ define amdgpu_kernel void @flat_singlethread_acquire_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") acquire seq_cst ret void } @@ -5752,7 +5752,7 @@ define amdgpu_kernel void @flat_singlethread_release_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") release seq_cst ret void } @@ -5990,7 +5990,7 @@ define amdgpu_kernel void @flat_singlethread_acq_rel_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") acq_rel seq_cst ret void } @@ -6228,7 +6228,7 @@ define amdgpu_kernel void @flat_singlethread_seq_cst_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") seq_cst seq_cst ret void } @@ -6510,7 +6510,7 @@ define amdgpu_kernel void @flat_singlethread_monotonic_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") monotonic monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -6794,7 +6794,7 @@ define amdgpu_kernel void @flat_singlethread_acquire_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") acquire monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -7078,7 +7078,7 @@ define amdgpu_kernel void @flat_singlethread_release_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") release monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -7362,7 +7362,7 @@ define amdgpu_kernel void @flat_singlethread_acq_rel_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") acq_rel monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -7646,7 +7646,7 @@ define amdgpu_kernel void @flat_singlethread_seq_cst_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") seq_cst monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -7930,7 +7930,7 @@ define amdgpu_kernel void @flat_singlethread_monotonic_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") monotonic acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -8214,7 +8214,7 @@ define amdgpu_kernel void @flat_singlethread_acquire_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") acquire acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -8498,7 +8498,7 @@ define amdgpu_kernel void @flat_singlethread_release_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") release acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -8782,7 +8782,7 @@ define amdgpu_kernel void @flat_singlethread_acq_rel_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") acq_rel acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -9066,7 +9066,7 @@ define amdgpu_kernel void @flat_singlethread_seq_cst_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") seq_cst acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -9350,7 +9350,7 @@ define amdgpu_kernel void @flat_singlethread_monotonic_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") monotonic seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -9634,7 +9634,7 @@ define amdgpu_kernel void @flat_singlethread_acquire_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") acquire seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -9918,7 +9918,7 @@ define amdgpu_kernel void @flat_singlethread_release_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") release seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -10202,7 +10202,7 @@ define amdgpu_kernel void @flat_singlethread_acq_rel_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") acq_rel seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -10486,7 +10486,7 @@ define amdgpu_kernel void @flat_singlethread_seq_cst_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread") seq_cst seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -13377,7 +13377,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_monotonic_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") monotonic monotonic ret void } @@ -13615,7 +13615,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_acquire_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") acquire monotonic ret void } @@ -13853,7 +13853,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_release_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") release monotonic ret void } @@ -14091,7 +14091,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_acq_rel_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") acq_rel monotonic ret void } @@ -14329,7 +14329,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_seq_cst_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") seq_cst monotonic ret void } @@ -14567,7 +14567,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_monotonic_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") monotonic acquire ret void } @@ -14805,7 +14805,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_acquire_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") acquire acquire ret void } @@ -15043,7 +15043,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_release_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") release acquire ret void } @@ -15281,7 +15281,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_acq_rel_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") acq_rel acquire ret void } @@ -15519,7 +15519,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_seq_cst_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") seq_cst acquire ret void } @@ -15757,7 +15757,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_monotonic_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") monotonic seq_cst ret void } @@ -15995,7 +15995,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_acquire_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") acquire seq_cst ret void } @@ -16233,7 +16233,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_release_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") release seq_cst ret void } @@ -16471,7 +16471,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_acq_rel_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") acq_rel seq_cst ret void } @@ -16709,7 +16709,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_seq_cst_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") seq_cst seq_cst ret void } @@ -16991,7 +16991,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_monotonic_monotonic_ret_cmpx ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") monotonic monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -17275,7 +17275,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_acquire_monotonic_ret_cmpxch ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") acquire monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -17559,7 +17559,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_release_monotonic_ret_cmpxch ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") release monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -17843,7 +17843,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_acq_rel_monotonic_ret_cmpxch ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") acq_rel monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -18127,7 +18127,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_seq_cst_monotonic_ret_cmpxch ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") seq_cst monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -18411,7 +18411,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_monotonic_acquire_ret_cmpxch ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") monotonic acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -18695,7 +18695,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_acquire_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") acquire acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -18979,7 +18979,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_release_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") release acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -19263,7 +19263,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_acq_rel_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") acq_rel acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -19547,7 +19547,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_seq_cst_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") seq_cst acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -19831,7 +19831,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_monotonic_seq_cst_ret_cmpxch ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") monotonic seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -20115,7 +20115,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_acquire_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") acquire seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -20399,7 +20399,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_release_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") release seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -20683,7 +20683,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_acq_rel_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") acq_rel seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -20967,7 +20967,7 @@ define amdgpu_kernel void @flat_singlethread_one_as_seq_cst_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("singlethread-one-as") seq_cst seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-system.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-system.ll index 919fc3e8f4e4f..8d44c60aa77ae 100644 --- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-system.ll +++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-system.ll @@ -3310,7 +3310,7 @@ define amdgpu_kernel void @flat_system_monotonic_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in monotonic monotonic ret void } @@ -3581,7 +3581,7 @@ define amdgpu_kernel void @flat_system_acquire_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in acquire monotonic ret void } @@ -3847,7 +3847,7 @@ define amdgpu_kernel void @flat_system_release_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in release monotonic ret void } @@ -4146,7 +4146,7 @@ define amdgpu_kernel void @flat_system_acq_rel_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in acq_rel monotonic ret void } @@ -4445,7 +4445,7 @@ define amdgpu_kernel void @flat_system_seq_cst_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in seq_cst monotonic ret void } @@ -4716,7 +4716,7 @@ define amdgpu_kernel void @flat_system_monotonic_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in monotonic acquire ret void } @@ -4987,7 +4987,7 @@ define amdgpu_kernel void @flat_system_acquire_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in acquire acquire ret void } @@ -5286,7 +5286,7 @@ define amdgpu_kernel void @flat_system_release_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in release acquire ret void } @@ -5585,7 +5585,7 @@ define amdgpu_kernel void @flat_system_acq_rel_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in acq_rel acquire ret void } @@ -5884,7 +5884,7 @@ define amdgpu_kernel void @flat_system_seq_cst_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in seq_cst acquire ret void } @@ -6183,7 +6183,7 @@ define amdgpu_kernel void @flat_system_monotonic_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in monotonic seq_cst ret void } @@ -6482,7 +6482,7 @@ define amdgpu_kernel void @flat_system_acquire_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in acquire seq_cst ret void } @@ -6781,7 +6781,7 @@ define amdgpu_kernel void @flat_system_release_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in release seq_cst ret void } @@ -7080,7 +7080,7 @@ define amdgpu_kernel void @flat_system_acq_rel_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in acq_rel seq_cst ret void } @@ -7379,7 +7379,7 @@ define amdgpu_kernel void @flat_system_seq_cst_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in seq_cst seq_cst ret void } @@ -7661,7 +7661,7 @@ define amdgpu_kernel void @flat_system_monotonic_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in monotonic monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -7962,7 +7962,7 @@ define amdgpu_kernel void @flat_system_acquire_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in acquire monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -8274,7 +8274,7 @@ define amdgpu_kernel void @flat_system_release_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in release monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -8607,7 +8607,7 @@ define amdgpu_kernel void @flat_system_acq_rel_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in acq_rel monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -8940,7 +8940,7 @@ define amdgpu_kernel void @flat_system_seq_cst_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in seq_cst monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -9245,7 +9245,7 @@ define amdgpu_kernel void @flat_system_monotonic_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in monotonic acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -9546,7 +9546,7 @@ define amdgpu_kernel void @flat_system_acquire_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in acquire acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -9879,7 +9879,7 @@ define amdgpu_kernel void @flat_system_release_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in release acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -10212,7 +10212,7 @@ define amdgpu_kernel void @flat_system_acq_rel_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in acq_rel acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -10545,7 +10545,7 @@ define amdgpu_kernel void @flat_system_seq_cst_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in seq_cst acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -10878,7 +10878,7 @@ define amdgpu_kernel void @flat_system_monotonic_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in monotonic seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -11207,7 +11207,7 @@ define amdgpu_kernel void @flat_system_acquire_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in acquire seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -11540,7 +11540,7 @@ define amdgpu_kernel void @flat_system_release_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in release seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -11873,7 +11873,7 @@ define amdgpu_kernel void @flat_system_acq_rel_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in acq_rel seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -12206,7 +12206,7 @@ define amdgpu_kernel void @flat_system_seq_cst_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in seq_cst seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -15549,7 +15549,7 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") monotonic monotonic ret void } @@ -15816,7 +15816,7 @@ define amdgpu_kernel void @flat_system_one_as_acquire_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") acquire monotonic ret void } @@ -16082,7 +16082,7 @@ define amdgpu_kernel void @flat_system_one_as_release_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") release monotonic ret void } @@ -16377,7 +16377,7 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") acq_rel monotonic ret void } @@ -16672,7 +16672,7 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") seq_cst monotonic ret void } @@ -16939,7 +16939,7 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") monotonic acquire ret void } @@ -17206,7 +17206,7 @@ define amdgpu_kernel void @flat_system_one_as_acquire_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") acquire acquire ret void } @@ -17501,7 +17501,7 @@ define amdgpu_kernel void @flat_system_one_as_release_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") release acquire ret void } @@ -17796,7 +17796,7 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") acq_rel acquire ret void } @@ -18091,7 +18091,7 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") seq_cst acquire ret void } @@ -18386,7 +18386,7 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") monotonic seq_cst ret void } @@ -18681,7 +18681,7 @@ define amdgpu_kernel void @flat_system_one_as_acquire_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") acquire seq_cst ret void } @@ -18976,7 +18976,7 @@ define amdgpu_kernel void @flat_system_one_as_release_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") release seq_cst ret void } @@ -19271,7 +19271,7 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") acq_rel seq_cst ret void } @@ -19566,7 +19566,7 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") seq_cst seq_cst ret void } @@ -19848,7 +19848,7 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") monotonic monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -20159,7 +20159,7 @@ define amdgpu_kernel void @flat_system_one_as_acquire_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") acquire monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -20471,7 +20471,7 @@ define amdgpu_kernel void @flat_system_one_as_release_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") release monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -20814,7 +20814,7 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") acq_rel monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -21157,7 +21157,7 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") seq_cst monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -21472,7 +21472,7 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") monotonic acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -21783,7 +21783,7 @@ define amdgpu_kernel void @flat_system_one_as_acquire_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") acquire acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -22126,7 +22126,7 @@ define amdgpu_kernel void @flat_system_one_as_release_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") release acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -22469,7 +22469,7 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") acq_rel acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -22812,7 +22812,7 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") seq_cst acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -23155,7 +23155,7 @@ define amdgpu_kernel void @flat_system_one_as_monotonic_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") monotonic seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -23494,7 +23494,7 @@ define amdgpu_kernel void @flat_system_one_as_acquire_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") acquire seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -23837,7 +23837,7 @@ define amdgpu_kernel void @flat_system_one_as_release_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") release seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -24180,7 +24180,7 @@ define amdgpu_kernel void @flat_system_one_as_acq_rel_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") acq_rel seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -24523,7 +24523,7 @@ define amdgpu_kernel void @flat_system_one_as_seq_cst_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("one-as") seq_cst seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-wavefront.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-wavefront.ll index 7c637a20ab47b..8a976fb729fda 100644 --- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-wavefront.ll +++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-wavefront.ll @@ -2896,7 +2896,7 @@ define amdgpu_kernel void @flat_wavefront_monotonic_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") monotonic monotonic ret void } @@ -3134,7 +3134,7 @@ define amdgpu_kernel void @flat_wavefront_acquire_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") acquire monotonic ret void } @@ -3372,7 +3372,7 @@ define amdgpu_kernel void @flat_wavefront_release_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") release monotonic ret void } @@ -3610,7 +3610,7 @@ define amdgpu_kernel void @flat_wavefront_acq_rel_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") acq_rel monotonic ret void } @@ -3848,7 +3848,7 @@ define amdgpu_kernel void @flat_wavefront_seq_cst_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") seq_cst monotonic ret void } @@ -4086,7 +4086,7 @@ define amdgpu_kernel void @flat_wavefront_monotonic_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") monotonic acquire ret void } @@ -4324,7 +4324,7 @@ define amdgpu_kernel void @flat_wavefront_acquire_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") acquire acquire ret void } @@ -4562,7 +4562,7 @@ define amdgpu_kernel void @flat_wavefront_release_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") release acquire ret void } @@ -4800,7 +4800,7 @@ define amdgpu_kernel void @flat_wavefront_acq_rel_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") acq_rel acquire ret void } @@ -5038,7 +5038,7 @@ define amdgpu_kernel void @flat_wavefront_seq_cst_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") seq_cst acquire ret void } @@ -5276,7 +5276,7 @@ define amdgpu_kernel void @flat_wavefront_monotonic_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") monotonic seq_cst ret void } @@ -5514,7 +5514,7 @@ define amdgpu_kernel void @flat_wavefront_acquire_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") acquire seq_cst ret void } @@ -5752,7 +5752,7 @@ define amdgpu_kernel void @flat_wavefront_release_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") release seq_cst ret void } @@ -5990,7 +5990,7 @@ define amdgpu_kernel void @flat_wavefront_acq_rel_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") acq_rel seq_cst ret void } @@ -6228,7 +6228,7 @@ define amdgpu_kernel void @flat_wavefront_seq_cst_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") seq_cst seq_cst ret void } @@ -6510,7 +6510,7 @@ define amdgpu_kernel void @flat_wavefront_monotonic_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") monotonic monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -6794,7 +6794,7 @@ define amdgpu_kernel void @flat_wavefront_acquire_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") acquire monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -7078,7 +7078,7 @@ define amdgpu_kernel void @flat_wavefront_release_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") release monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -7362,7 +7362,7 @@ define amdgpu_kernel void @flat_wavefront_acq_rel_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") acq_rel monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -7646,7 +7646,7 @@ define amdgpu_kernel void @flat_wavefront_seq_cst_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") seq_cst monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -7930,7 +7930,7 @@ define amdgpu_kernel void @flat_wavefront_monotonic_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") monotonic acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -8214,7 +8214,7 @@ define amdgpu_kernel void @flat_wavefront_acquire_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") acquire acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -8498,7 +8498,7 @@ define amdgpu_kernel void @flat_wavefront_release_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") release acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -8782,7 +8782,7 @@ define amdgpu_kernel void @flat_wavefront_acq_rel_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") acq_rel acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -9066,7 +9066,7 @@ define amdgpu_kernel void @flat_wavefront_seq_cst_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") seq_cst acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -9350,7 +9350,7 @@ define amdgpu_kernel void @flat_wavefront_monotonic_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") monotonic seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -9634,7 +9634,7 @@ define amdgpu_kernel void @flat_wavefront_acquire_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") acquire seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -9918,7 +9918,7 @@ define amdgpu_kernel void @flat_wavefront_release_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") release seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -10202,7 +10202,7 @@ define amdgpu_kernel void @flat_wavefront_acq_rel_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") acq_rel seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -10486,7 +10486,7 @@ define amdgpu_kernel void @flat_wavefront_seq_cst_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront") seq_cst seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -13377,7 +13377,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_monotonic_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") monotonic monotonic ret void } @@ -13615,7 +13615,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_acquire_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") acquire monotonic ret void } @@ -13853,7 +13853,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_release_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") release monotonic ret void } @@ -14091,7 +14091,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_acq_rel_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") acq_rel monotonic ret void } @@ -14329,7 +14329,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_seq_cst_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") seq_cst monotonic ret void } @@ -14567,7 +14567,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_monotonic_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") monotonic acquire ret void } @@ -14805,7 +14805,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_acquire_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") acquire acquire ret void } @@ -15043,7 +15043,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_release_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") release acquire ret void } @@ -15281,7 +15281,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_acq_rel_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") acq_rel acquire ret void } @@ -15519,7 +15519,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_seq_cst_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") seq_cst acquire ret void } @@ -15757,7 +15757,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_monotonic_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") monotonic seq_cst ret void } @@ -15995,7 +15995,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_acquire_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") acquire seq_cst ret void } @@ -16233,7 +16233,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_release_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") release seq_cst ret void } @@ -16471,7 +16471,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_acq_rel_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") acq_rel seq_cst ret void } @@ -16709,7 +16709,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_seq_cst_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") seq_cst seq_cst ret void } @@ -16991,7 +16991,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_monotonic_monotonic_ret_cmpxchg ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") monotonic monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -17275,7 +17275,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_acquire_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") acquire monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -17559,7 +17559,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_acq_rel_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") acq_rel monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -17843,7 +17843,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_seq_cst_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") seq_cst monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -18127,7 +18127,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_monotonic_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") monotonic acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -18411,7 +18411,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_acquire_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") acquire acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -18695,7 +18695,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_release_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") release acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -18979,7 +18979,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_acq_rel_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") acq_rel acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -19263,7 +19263,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_seq_cst_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") seq_cst acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -19547,7 +19547,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_monotonic_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") monotonic seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -19831,7 +19831,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_acquire_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") acquire seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -20115,7 +20115,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_release_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") release seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -20399,7 +20399,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_acq_relc_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") acq_rel seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -20683,7 +20683,7 @@ define amdgpu_kernel void @flat_wavefront_one_as_seq_cst_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("wavefront-one-as") seq_cst seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-workgroup.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-workgroup.ll index 0fd4aa4a7a93f..a08783436dd15 100644 --- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-workgroup.ll +++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-workgroup.ll @@ -3155,7 +3155,7 @@ define amdgpu_kernel void @flat_workgroup_monotonic_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") monotonic monotonic ret void } @@ -3412,7 +3412,7 @@ define amdgpu_kernel void @flat_workgroup_acquire_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") acquire monotonic ret void } @@ -3667,7 +3667,7 @@ define amdgpu_kernel void @flat_workgroup_release_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") release monotonic ret void } @@ -3941,7 +3941,7 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") acq_rel monotonic ret void } @@ -4215,7 +4215,7 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") seq_cst monotonic ret void } @@ -4472,7 +4472,7 @@ define amdgpu_kernel void @flat_workgroup_monotonic_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") monotonic acquire ret void } @@ -4729,7 +4729,7 @@ define amdgpu_kernel void @flat_workgroup_acquire_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") acquire acquire ret void } @@ -5003,7 +5003,7 @@ define amdgpu_kernel void @flat_workgroup_release_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") release acquire ret void } @@ -5277,7 +5277,7 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") acq_rel acquire ret void } @@ -5551,7 +5551,7 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") seq_cst acquire ret void } @@ -5825,7 +5825,7 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") seq_cst seq_cst ret void } @@ -6107,7 +6107,7 @@ define amdgpu_kernel void @flat_workgroup_monotonic_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") monotonic monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -6403,7 +6403,7 @@ define amdgpu_kernel void @flat_workgroup_acquire_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") acquire monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -6704,7 +6704,7 @@ define amdgpu_kernel void @flat_workgroup_release_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") release monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -7019,7 +7019,7 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") acq_rel monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -7334,7 +7334,7 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") seq_cst monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -7632,7 +7632,7 @@ define amdgpu_kernel void @flat_workgroup_monotonic_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") monotonic acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -7928,7 +7928,7 @@ define amdgpu_kernel void @flat_workgroup_acquire_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") acquire acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -8243,7 +8243,7 @@ define amdgpu_kernel void @flat_workgroup_release_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") release acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -8558,7 +8558,7 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") acq_rel acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -8873,7 +8873,7 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") seq_cst acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -9188,7 +9188,7 @@ define amdgpu_kernel void @flat_workgroup_monotonic_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") monotonic seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -9501,7 +9501,7 @@ define amdgpu_kernel void @flat_workgroup_acquire_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") acquire seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -9816,7 +9816,7 @@ define amdgpu_kernel void @flat_workgroup_release_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") release seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -10131,7 +10131,7 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") acq_rel seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -10446,7 +10446,7 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup") seq_cst seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -13493,7 +13493,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_monotonic_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") monotonic monotonic ret void } @@ -13741,7 +13741,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_acquire_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") acquire monotonic ret void } @@ -13989,7 +13989,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") release monotonic ret void } @@ -14247,7 +14247,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") acq_rel monotonic ret void } @@ -14505,7 +14505,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_monotonic_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") seq_cst monotonic ret void } @@ -14753,7 +14753,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_monotonic_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") monotonic acquire ret void } @@ -15001,7 +15001,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_acquire_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") acquire acquire ret void } @@ -15259,7 +15259,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") release acquire ret void } @@ -15517,7 +15517,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") acq_rel acquire ret void } @@ -15775,7 +15775,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_acquire_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") seq_cst acquire ret void } @@ -16033,7 +16033,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_monotonic_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") monotonic seq_cst ret void } @@ -16291,7 +16291,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_acquire_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") acquire seq_cst ret void } @@ -16549,7 +16549,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") release seq_cst ret void } @@ -16807,7 +16807,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") acq_rel seq_cst ret void } @@ -17065,7 +17065,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_seq_cst_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") seq_cst seq_cst ret void } @@ -17347,7 +17347,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_monotonicmonotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") monotonic monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -17639,7 +17639,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_acquire_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") acquire monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -17933,7 +17933,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") release monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -18237,7 +18237,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") acq_rel monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -18541,7 +18541,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_monotonic_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") seq_cst monotonic %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -18835,7 +18835,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_monotonic_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") monotonic acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -19127,7 +19127,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_acquire_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") acquire acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -19431,7 +19431,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") release acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -19735,7 +19735,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") acq_rel acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -20039,7 +20039,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_acquire_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") seq_cst acquire %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -20343,7 +20343,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_monotonic_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") monotonic seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -20645,7 +20645,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_acquire_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") acquire seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -20949,7 +20949,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") release seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -21253,7 +21253,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") acq_rel seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 @@ -21557,7 +21557,7 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_seq_cst_ret_cmpxchg( ; GFX12-CU-NEXT: s_endpgm ptr %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, ptr %out, i32 4 + %gep = getelementptr inbounds i32, ptr %out, i32 4 %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("workgroup-one-as") seq_cst seq_cst %val0 = extractvalue { i32, i1 } %val, 0 store i32 %val0, ptr %out, align 4 diff --git a/llvm/test/CodeGen/AMDGPU/offset-split-flat.ll b/llvm/test/CodeGen/AMDGPU/offset-split-flat.ll index bf450ab6e80c4..1d155b522b771 100644 --- a/llvm/test/CodeGen/AMDGPU/offset-split-flat.ll +++ b/llvm/test/CodeGen/AMDGPU/offset-split-flat.ll @@ -45,7 +45,7 @@ define i8 @flat_inst_valu_offset_1(ptr %p) { ; GFX12-NEXT: flat_load_u8 v0, v[0:1] offset:1 ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 1 + %gep = getelementptr inbounds i8, ptr %p, i64 1 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -84,7 +84,7 @@ define i8 @flat_inst_valu_offset_11bit_max(ptr %p) { ; GFX12-NEXT: flat_load_u8 v0, v[0:1] offset:2047 ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 2047 + %gep = getelementptr inbounds i8, ptr %p, i64 2047 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -123,7 +123,7 @@ define i8 @flat_inst_valu_offset_12bit_max(ptr %p) { ; GFX12-NEXT: flat_load_u8 v0, v[0:1] offset:4095 ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 4095 + %gep = getelementptr inbounds i8, ptr %p, i64 4095 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -186,7 +186,7 @@ define i8 @flat_inst_valu_offset_13bit_max(ptr %p) { ; GFX11-GISEL-NEXT: flat_load_u8 v0, v[0:1] ; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 8191 + %gep = getelementptr inbounds i8, ptr %p, i64 8191 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -249,7 +249,7 @@ define i8 @flat_inst_valu_offset_24bit_max(ptr %p) { ; GFX11-GISEL-NEXT: flat_load_u8 v0, v[0:1] ; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 8388607 + %gep = getelementptr inbounds i8, ptr %p, i64 8388607 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -293,7 +293,7 @@ define i8 @flat_inst_valu_offset_neg_11bit_max(ptr %p) { ; GFX12-NEXT: flat_load_u8 v0, v[0:1] offset:-2048 ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 -2048 + %gep = getelementptr inbounds i8, ptr %p, i64 -2048 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -337,7 +337,7 @@ define i8 @flat_inst_valu_offset_neg_12bit_max(ptr %p) { ; GFX12-NEXT: flat_load_u8 v0, v[0:1] offset:-4096 ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 -4096 + %gep = getelementptr inbounds i8, ptr %p, i64 -4096 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -381,7 +381,7 @@ define i8 @flat_inst_valu_offset_neg_13bit_max(ptr %p) { ; GFX12-NEXT: flat_load_u8 v0, v[0:1] offset:-8192 ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 -8192 + %gep = getelementptr inbounds i8, ptr %p, i64 -8192 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -425,7 +425,7 @@ define i8 @flat_inst_valu_offset_neg_24bit_max(ptr %p) { ; GFX12-NEXT: flat_load_u8 v0, v[0:1] offset:-8388608 ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 -8388608 + %gep = getelementptr inbounds i8, ptr %p, i64 -8388608 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -465,7 +465,7 @@ define i8 @flat_inst_valu_offset_2x_11bit_max(ptr %p) { ; GFX12-NEXT: flat_load_u8 v0, v[0:1] offset:4095 ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 4095 + %gep = getelementptr inbounds i8, ptr %p, i64 4095 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -528,7 +528,7 @@ define i8 @flat_inst_valu_offset_2x_12bit_max(ptr %p) { ; GFX11-GISEL-NEXT: flat_load_u8 v0, v[0:1] ; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 8191 + %gep = getelementptr inbounds i8, ptr %p, i64 8191 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -591,7 +591,7 @@ define i8 @flat_inst_valu_offset_2x_13bit_max(ptr %p) { ; GFX11-GISEL-NEXT: flat_load_u8 v0, v[0:1] ; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 16383 + %gep = getelementptr inbounds i8, ptr %p, i64 16383 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -671,7 +671,7 @@ define i8 @flat_inst_valu_offset_2x_24bit_max(ptr %p) { ; GFX12-GISEL-NEXT: flat_load_u8 v0, v[0:1] ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 16777214 + %gep = getelementptr inbounds i8, ptr %p, i64 16777214 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -715,7 +715,7 @@ define i8 @flat_inst_valu_offset_2x_neg_11bit_max(ptr %p) { ; GFX12-NEXT: flat_load_u8 v0, v[0:1] offset:-4096 ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 -4096 + %gep = getelementptr inbounds i8, ptr %p, i64 -4096 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -759,7 +759,7 @@ define i8 @flat_inst_valu_offset_2x_neg_12bit_max(ptr %p) { ; GFX12-NEXT: flat_load_u8 v0, v[0:1] offset:-8192 ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 -8192 + %gep = getelementptr inbounds i8, ptr %p, i64 -8192 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -803,7 +803,7 @@ define i8 @flat_inst_valu_offset_2x_neg_13bit_max(ptr %p) { ; GFX12-NEXT: flat_load_u8 v0, v[0:1] offset:-16384 ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 -16384 + %gep = getelementptr inbounds i8, ptr %p, i64 -16384 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -864,7 +864,7 @@ define i8 @flat_inst_valu_offset_2x_neg_24bit_max(ptr %p) { ; GFX12-GISEL-NEXT: flat_load_u8 v0, v[0:1] ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 -16777215 + %gep = getelementptr inbounds i8, ptr %p, i64 -16777215 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -945,7 +945,7 @@ define i8 @flat_inst_valu_offset_64bit_11bit_split0(ptr %p) { ; GFX12-GISEL-NEXT: flat_load_u8 v0, v[0:1] ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 8589936639 + %gep = getelementptr inbounds i8, ptr %p, i64 8589936639 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -1026,7 +1026,7 @@ define i8 @flat_inst_valu_offset_64bit_11bit_split1(ptr %p) { ; GFX12-GISEL-NEXT: flat_load_u8 v0, v[0:1] ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 8589936640 + %gep = getelementptr inbounds i8, ptr %p, i64 8589936640 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -1107,7 +1107,7 @@ define i8 @flat_inst_valu_offset_64bit_12bit_split0(ptr %p) { ; GFX12-GISEL-NEXT: flat_load_u8 v0, v[0:1] ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 8589938687 + %gep = getelementptr inbounds i8, ptr %p, i64 8589938687 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -1169,7 +1169,7 @@ define i8 @flat_inst_valu_offset_64bit_12bit_split1(ptr %p) { ; GFX12-GISEL-NEXT: flat_load_u8 v0, v[0:1] ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 8589938688 + %gep = getelementptr inbounds i8, ptr %p, i64 8589938688 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -1250,7 +1250,7 @@ define i8 @flat_inst_valu_offset_64bit_13bit_split0(ptr %p) { ; GFX12-GISEL-NEXT: flat_load_u8 v0, v[0:1] ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 8589942783 + %gep = getelementptr inbounds i8, ptr %p, i64 8589942783 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -1312,7 +1312,7 @@ define i8 @flat_inst_valu_offset_64bit_13bit_split1(ptr %p) { ; GFX12-GISEL-NEXT: flat_load_u8 v0, v[0:1] ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 8589942784 + %gep = getelementptr inbounds i8, ptr %p, i64 8589942784 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -1385,7 +1385,7 @@ define i8 @flat_inst_valu_offset_64bit_11bit_neg_high_split0(ptr %p) { ; GFX12-GISEL-NEXT: flat_load_u8 v0, v[0:1] ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 -9223372036854773761 + %gep = getelementptr inbounds i8, ptr %p, i64 -9223372036854773761 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -1458,7 +1458,7 @@ define i8 @flat_inst_valu_offset_64bit_11bit_neg_high_split1(ptr %p) { ; GFX12-GISEL-NEXT: flat_load_u8 v0, v[0:1] ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 -9223372036854773760 + %gep = getelementptr inbounds i8, ptr %p, i64 -9223372036854773760 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -1531,7 +1531,7 @@ define i8 @flat_inst_valu_offset_64bit_12bit_neg_high_split0(ptr %p) { ; GFX12-GISEL-NEXT: flat_load_u8 v0, v[0:1] ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 -9223372036854771713 + %gep = getelementptr inbounds i8, ptr %p, i64 -9223372036854771713 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -1604,7 +1604,7 @@ define i8 @flat_inst_valu_offset_64bit_12bit_neg_high_split1(ptr %p) { ; GFX12-GISEL-NEXT: flat_load_u8 v0, v[0:1] ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 -9223372036854771712 + %gep = getelementptr inbounds i8, ptr %p, i64 -9223372036854771712 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -1677,7 +1677,7 @@ define i8 @flat_inst_valu_offset_64bit_13bit_neg_high_split0(ptr %p) { ; GFX12-GISEL-NEXT: flat_load_u8 v0, v[0:1] ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 -9223372036854767617 + %gep = getelementptr inbounds i8, ptr %p, i64 -9223372036854767617 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -1750,7 +1750,7 @@ define i8 @flat_inst_valu_offset_64bit_13bit_neg_high_split1(ptr %p) { ; GFX12-GISEL-NEXT: flat_load_u8 v0, v[0:1] ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] - %gep = getelementptr i8, ptr %p, i64 -9223372036854767616 + %gep = getelementptr inbounds i8, ptr %p, i64 -9223372036854767616 %load = load i8, ptr %gep, align 4 ret i8 %load } @@ -1799,7 +1799,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_1(ptr %p) { ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: flat_store_b8 v[0:1], v0 ; GFX12-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 1 + %gep = getelementptr inbounds i8, ptr %p, i64 1 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -1849,7 +1849,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_11bit_max(ptr %p) { ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: flat_store_b8 v[0:1], v0 ; GFX12-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 2047 + %gep = getelementptr inbounds i8, ptr %p, i64 2047 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -1899,7 +1899,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_12bit_max(ptr %p) { ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: flat_store_b8 v[0:1], v0 ; GFX12-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 4095 + %gep = getelementptr inbounds i8, ptr %p, i64 4095 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -1979,7 +1979,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_13bit_max(ptr %p) { ; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX11-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX11-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 8191 + %gep = getelementptr inbounds i8, ptr %p, i64 8191 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -2059,7 +2059,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_neg_11bit_max(ptr %p) { ; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX11-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX11-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 -2048 + %gep = getelementptr inbounds i8, ptr %p, i64 -2048 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -2139,7 +2139,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_neg_12bit_max(ptr %p) { ; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX11-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX11-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 -4096 + %gep = getelementptr inbounds i8, ptr %p, i64 -4096 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -2219,7 +2219,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_neg_13bit_max(ptr %p) { ; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX11-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX11-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 -8192 + %gep = getelementptr inbounds i8, ptr %p, i64 -8192 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -2269,7 +2269,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_2x_11bit_max(ptr %p) { ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: flat_store_b8 v[0:1], v0 ; GFX12-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 4095 + %gep = getelementptr inbounds i8, ptr %p, i64 4095 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -2349,7 +2349,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_2x_12bit_max(ptr %p) { ; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX11-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX11-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 8191 + %gep = getelementptr inbounds i8, ptr %p, i64 8191 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -2429,7 +2429,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_2x_13bit_max(ptr %p) { ; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX11-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX11-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 16383 + %gep = getelementptr inbounds i8, ptr %p, i64 16383 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -2509,7 +2509,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_2x_neg_11bit_max(ptr %p) { ; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX11-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX11-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 -4096 + %gep = getelementptr inbounds i8, ptr %p, i64 -4096 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -2589,7 +2589,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_2x_neg_12bit_max(ptr %p) { ; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX11-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX11-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 -8192 + %gep = getelementptr inbounds i8, ptr %p, i64 -8192 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -2669,7 +2669,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_2x_neg_13bit_max(ptr %p) { ; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX11-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX11-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 -16384 + %gep = getelementptr inbounds i8, ptr %p, i64 -16384 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -2764,7 +2764,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_64bit_11bit_split0(ptr %p) { ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX12-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 8589936639 + %gep = getelementptr inbounds i8, ptr %p, i64 8589936639 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -2859,7 +2859,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_64bit_11bit_split1(ptr %p) { ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX12-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 8589936640 + %gep = getelementptr inbounds i8, ptr %p, i64 8589936640 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -2954,7 +2954,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_64bit_12bit_split0(ptr %p) { ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX12-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 8589938687 + %gep = getelementptr inbounds i8, ptr %p, i64 8589938687 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -3050,7 +3050,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_64bit_12bit_split1(ptr %p) { ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX12-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 8589938688 + %gep = getelementptr inbounds i8, ptr %p, i64 8589938688 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -3146,7 +3146,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_64bit_13bit_split0(ptr %p) { ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX12-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 8589942783 + %gep = getelementptr inbounds i8, ptr %p, i64 8589942783 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -3242,7 +3242,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_64bit_13bit_split1(ptr %p) { ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX12-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 8589942784 + %gep = getelementptr inbounds i8, ptr %p, i64 8589942784 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -3341,7 +3341,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_64bit_11bit_neg_high_split0(ptr ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX12-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 -9223372036854773761 + %gep = getelementptr inbounds i8, ptr %p, i64 -9223372036854773761 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -3440,7 +3440,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_64bit_11bit_neg_high_split1(ptr ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX12-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 -9223372036854773760 + %gep = getelementptr inbounds i8, ptr %p, i64 -9223372036854773760 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -3539,7 +3539,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_64bit_12bit_neg_high_split0(ptr ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX12-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 -9223372036854771713 + %gep = getelementptr inbounds i8, ptr %p, i64 -9223372036854771713 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -3638,7 +3638,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_64bit_12bit_neg_high_split1(ptr ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX12-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 -9223372036854771712 + %gep = getelementptr inbounds i8, ptr %p, i64 -9223372036854771712 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -3737,7 +3737,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_64bit_13bit_neg_high_split0(ptr ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX12-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 -9223372036854767617 + %gep = getelementptr inbounds i8, ptr %p, i64 -9223372036854767617 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void @@ -3836,7 +3836,7 @@ define amdgpu_kernel void @flat_inst_salu_offset_64bit_13bit_neg_high_split1(ptr ; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-GISEL-NEXT: flat_store_b8 v[0:1], v0 ; GFX12-GISEL-NEXT: s_endpgm - %gep = getelementptr i8, ptr %p, i64 -9223372036854767616 + %gep = getelementptr inbounds i8, ptr %p, i64 -9223372036854767616 %load = load volatile i8, ptr %gep, align 1 store i8 %load, ptr poison ret void diff --git a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll index 35b55a0addd95..1cb4c140c316d 100644 --- a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll +++ b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll @@ -2643,7 +2643,7 @@ define amdgpu_kernel void @negativeoffsetnullptr(ptr %buffer) { ; GFX11-NEXT: s_endpgm entry: %null = select i1 false, ptr %buffer, ptr addrspacecast (ptr addrspace(5) null to ptr) - %gep = getelementptr i8, ptr %null, i64 -1 + %gep = getelementptr inbounds i8, ptr %null, i64 -1 %ld = load i8, ptr %gep %cmp = icmp eq i8 %ld, 0 br label %branch