Skip to content

Commit ae25a39

Browse files
committed
AMDGPU/GlobalISel: Enable sret demotion
1 parent 57e0cd3 commit ae25a39

File tree

6 files changed

+345
-27
lines changed

6 files changed

+345
-27
lines changed

llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -358,8 +358,8 @@ class CallLowering {
358358
/// described by \p Outs can fit into the return registers. If false
359359
/// is returned, an sret-demotion is performed.
360360
virtual bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv,
361-
SmallVectorImpl<BaseArgInfo> &Outs, bool IsVarArg,
362-
LLVMContext &Context) const {
361+
SmallVectorImpl<BaseArgInfo> &Outs,
362+
bool IsVarArg) const {
363363
return true;
364364
}
365365

llvm/lib/CodeGen/GlobalISel/CallLowering.cpp

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -95,8 +95,7 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
9595

9696
SmallVector<BaseArgInfo, 4> SplitArgs;
9797
getReturnInfo(CallConv, RetTy, CB.getAttributes(), SplitArgs, DL);
98-
Info.CanLowerReturn =
99-
canLowerReturn(MF, CallConv, SplitArgs, IsVarArg, RetTy->getContext());
98+
Info.CanLowerReturn = canLowerReturn(MF, CallConv, SplitArgs, IsVarArg);
10099

101100
if (!Info.CanLowerReturn) {
102101
// Callee requires sret demotion.
@@ -592,8 +591,7 @@ bool CallLowering::checkReturnTypeForCallConv(MachineFunction &MF) const {
592591
SmallVector<BaseArgInfo, 4> SplitArgs;
593592
getReturnInfo(CallConv, ReturnType, F.getAttributes(), SplitArgs,
594593
MF.getDataLayout());
595-
return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg(),
596-
ReturnType->getContext());
594+
return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg());
597595
}
598596

599597
bool CallLowering::analyzeArgInfo(CCState &CCState,

llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp

Lines changed: 30 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#include "SIMachineFunctionInfo.h"
2121
#include "SIRegisterInfo.h"
2222
#include "llvm/CodeGen/Analysis.h"
23+
#include "llvm/CodeGen/FunctionLoweringInfo.h"
2324
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
2425
#include "llvm/IR/IntrinsicsAMDGPU.h"
2526

@@ -420,6 +421,22 @@ static void unpackRegsToOrigType(MachineIRBuilder &B,
420421
B.buildUnmerge(UnmergeResults, UnmergeSrc);
421422
}
422423

424+
bool AMDGPUCallLowering::canLowerReturn(MachineFunction &MF,
425+
CallingConv::ID CallConv,
426+
SmallVectorImpl<BaseArgInfo> &Outs,
427+
bool IsVarArg) const {
428+
// For shaders. Vector types should be explicitly handled by CC.
429+
if (AMDGPU::isEntryFunctionCC(CallConv))
430+
return true;
431+
432+
SmallVector<CCValAssign, 16> ArgLocs;
433+
const SITargetLowering &TLI = *getTLI<SITargetLowering>();
434+
CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs,
435+
MF.getFunction().getContext());
436+
437+
return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv, IsVarArg));
438+
}
439+
423440
/// Lower the return value for the already existing \p Ret. This assumes that
424441
/// \p B's insertion point is correct.
425442
bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B,
@@ -533,7 +550,9 @@ bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, const Value *Val,
533550
Ret.addUse(ReturnAddrVReg);
534551
}
535552

536-
if (!lowerReturnVal(B, Val, VRegs, Ret))
553+
if (!FLI.CanLowerReturn)
554+
insertSRetStores(B, Val->getType(), VRegs, FLI.DemoteRegister);
555+
else if (!lowerReturnVal(B, Val, VRegs, Ret))
537556
return false;
538557

539558
if (ReturnOpc == AMDGPU::S_SETPC_B64_return) {
@@ -872,6 +891,11 @@ bool AMDGPUCallLowering::lowerFormalArguments(
872891
unsigned Idx = 0;
873892
unsigned PSInputNum = 0;
874893

894+
// Insert the hidden sret parameter if the return value won't fit in the
895+
// return registers.
896+
if (!FLI.CanLowerReturn)
897+
insertSRetIncomingArgument(F, SplitArgs, FLI.DemoteRegister, MRI, DL);
898+
875899
for (auto &Arg : F.args()) {
876900
if (DL.getTypeStoreSize(Arg.getType()) == 0)
877901
continue;
@@ -1327,7 +1351,10 @@ bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
13271351
MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN);
13281352

13291353
SmallVector<ArgInfo, 8> InArgs;
1330-
if (!Info.OrigRet.Ty->isVoidTy()) {
1354+
if (!Info.CanLowerReturn) {
1355+
insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs,
1356+
Info.DemoteRegister, Info.DemoteStackIndex);
1357+
} else if (!Info.OrigRet.Ty->isVoidTy()) {
13311358
SmallVector<ArgInfo, 8> PreSplitRetInfos;
13321359

13331360
splitToValueTypes(
@@ -1350,7 +1377,7 @@ bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
13501377
// Finally we can copy the returned value back into its virtual-register. In
13511378
// symmetry with the arguments, the physical register must be an
13521379
// implicit-define of the call instruction.
1353-
if (!Info.OrigRet.Ty->isVoidTy()) {
1380+
if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) {
13541381
CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv,
13551382
Info.IsVarArg);
13561383
CallReturnHandler Handler(MIRBuilder, MRI, MIB, RetAssignFn);

llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,10 @@ class AMDGPUCallLowering final : public CallLowering {
4242
bool IsOutgoing,
4343
SplitArgTy PerformArgSplit) const;
4444

45+
bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv,
46+
SmallVectorImpl<BaseArgInfo> &Outs,
47+
bool IsVarArg) const override;
48+
4549
bool lowerReturnVal(MachineIRBuilder &B, const Value *Val,
4650
ArrayRef<Register> VRegs, MachineInstrBuilder &Ret) const;
4751

llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll

Lines changed: 51 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1172,53 +1172,86 @@ define void @void_func_sret_struct_i8_i32({ i8, i32 } addrspace(5)* sret({ i8, i
11721172

11731173
define <33 x i32> @v33i32_func_void() #0 {
11741174
; CHECK-LABEL: name: v33i32_func_void
1175-
; CHECK: bb.0:
1176-
; CHECK: successors: %bb.1(0x80000000)
1177-
; CHECK: liveins: $sgpr30_sgpr31
1178-
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1179-
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
11801175
; CHECK: bb.1 (%ir-block.0):
1176+
; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
1177+
; CHECK: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1178+
; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1179+
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
11811180
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<33 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
11821181
; CHECK: [[LOAD1:%[0-9]+]]:_(<33 x s32>) = G_LOAD [[LOAD]](p1) :: (load 132 from %ir.ptr, align 256, addrspace 1)
1183-
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32), [[UV32:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<33 x s32>)
1182+
; CHECK: G_STORE [[LOAD1]](<33 x s32>), [[COPY]](p5) :: (store 132, align 256, addrspace 5)
1183+
; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
1184+
; CHECK: S_SETPC_B64_return [[COPY2]]
11841185
%ptr = load volatile <33 x i32> addrspace(1)*, <33 x i32> addrspace(1)* addrspace(4)* undef
11851186
%val = load <33 x i32>, <33 x i32> addrspace(1)* %ptr
11861187
ret <33 x i32> %val
11871188
}
11881189

1190+
define <33 x i32> @v33i32_func_v33i32_i32(<33 x i32> addrspace(1)* %p, i32 %idx) #0 {
1191+
; CHECK-LABEL: name: v33i32_func_v33i32_i32
1192+
; CHECK: bb.1 (%ir-block.0):
1193+
; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
1194+
; CHECK: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1195+
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
1196+
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
1197+
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
1198+
; CHECK: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1199+
; CHECK: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
1200+
; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY3]](s32)
1201+
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 256
1202+
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]]
1203+
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[MV]], [[MUL]](s64)
1204+
; CHECK: [[COPY5:%[0-9]+]]:_(p1) = COPY [[PTR_ADD]](p1)
1205+
; CHECK: [[LOAD:%[0-9]+]]:_(<33 x s32>) = G_LOAD [[COPY5]](p1) :: (load 132 from %ir.gep, align 256, addrspace 1)
1206+
; CHECK: G_STORE [[LOAD]](<33 x s32>), [[COPY]](p5) :: (store 132, align 256, addrspace 5)
1207+
; CHECK: [[COPY6:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
1208+
; CHECK: S_SETPC_B64_return [[COPY6]]
1209+
%gep = getelementptr inbounds <33 x i32>, <33 x i32> addrspace(1)* %p, i32 %idx
1210+
%val = load <33 x i32>, <33 x i32> addrspace(1)* %gep
1211+
ret <33 x i32> %val
1212+
}
1213+
11891214
define { <32 x i32>, i32 } @struct_v32i32_i32_func_void() #0 {
11901215
; CHECK-LABEL: name: struct_v32i32_i32_func_void
1191-
; CHECK: bb.0:
1192-
; CHECK: successors: %bb.1(0x80000000)
1193-
; CHECK: liveins: $sgpr30_sgpr31
1194-
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1195-
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
11961216
; CHECK: bb.1 (%ir-block.0):
1217+
; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
1218+
; CHECK: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1219+
; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1220+
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
11971221
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `{ <32 x i32>, i32 } addrspace(1)* addrspace(4)* undef`, addrspace 4)
11981222
; CHECK: [[LOAD1:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[LOAD]](p1) :: (load 128 from %ir.ptr, addrspace 1)
11991223
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
12001224
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
12011225
; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 from %ir.ptr + 128, align 128, addrspace 1)
1202-
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<32 x s32>)
1226+
; CHECK: G_STORE [[LOAD1]](<32 x s32>), [[COPY]](p5) :: (store 128, addrspace 5)
1227+
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
1228+
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
1229+
; CHECK: G_STORE [[LOAD2]](s32), [[PTR_ADD1]](p5) :: (store 4, align 128, addrspace 5)
1230+
; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
1231+
; CHECK: S_SETPC_B64_return [[COPY2]]
12031232
%ptr = load volatile { <32 x i32>, i32 } addrspace(1)*, { <32 x i32>, i32 } addrspace(1)* addrspace(4)* undef
12041233
%val = load { <32 x i32>, i32 }, { <32 x i32>, i32 } addrspace(1)* %ptr
12051234
ret { <32 x i32>, i32 }%val
12061235
}
12071236

12081237
define { i32, <32 x i32> } @struct_i32_v32i32_func_void() #0 {
12091238
; CHECK-LABEL: name: struct_i32_v32i32_func_void
1210-
; CHECK: bb.0:
1211-
; CHECK: successors: %bb.1(0x80000000)
1212-
; CHECK: liveins: $sgpr30_sgpr31
1213-
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1214-
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
12151239
; CHECK: bb.1 (%ir-block.0):
1240+
; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
1241+
; CHECK: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1242+
; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
1243+
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
12161244
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `{ i32, <32 x i32> } addrspace(1)* addrspace(4)* undef`, addrspace 4)
12171245
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p1) :: (load 4 from %ir.ptr, align 128, addrspace 1)
12181246
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
12191247
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
12201248
; CHECK: [[LOAD2:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 128 from %ir.ptr + 128, addrspace 1)
1221-
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<32 x s32>)
1249+
; CHECK: G_STORE [[LOAD1]](s32), [[COPY]](p5) :: (store 4, align 128, addrspace 5)
1250+
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
1251+
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
1252+
; CHECK: G_STORE [[LOAD2]](<32 x s32>), [[PTR_ADD1]](p5) :: (store 128, addrspace 5)
1253+
; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
1254+
; CHECK: S_SETPC_B64_return [[COPY2]]
12221255
%ptr = load volatile { i32, <32 x i32> } addrspace(1)*, { i32, <32 x i32> } addrspace(1)* addrspace(4)* undef
12231256
%val = load { i32, <32 x i32> }, { i32, <32 x i32> } addrspace(1)* %ptr
12241257
ret { i32, <32 x i32> }%val

0 commit comments

Comments
 (0)