diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 23671c9ffcf19..443f5f71b1084 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -7281,9 +7281,16 @@ SDValue AArch64TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, Entry.Ty = IntPtrTy; Entry.Node = Trmp; Args.push_back(Entry); - Entry.Node = DAG.getConstant(20, dl, MVT::i64); - Args.push_back(Entry); + if (auto *FI = dyn_cast(Trmp.getNode())) { + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + Entry.Node = + DAG.getConstant(MFI.getObjectSize(FI->getIndex()), dl, MVT::i64); + } else + Entry.Node = DAG.getConstant(36, dl, MVT::i64); + + Args.push_back(Entry); Entry.Node = FPtr; Args.push_back(Entry); Entry.Node = Nest; diff --git a/llvm/test/CodeGen/AArch64/trampoline.ll b/llvm/test/CodeGen/AArch64/trampoline.ll index 293e538a7459d..30ac2aa283b3e 100644 --- a/llvm/test/CodeGen/AArch64/trampoline.ll +++ b/llvm/test/CodeGen/AArch64/trampoline.ll @@ -1,5 +1,7 @@ ; RUN: llc -mtriple=aarch64-- < %s | FileCheck %s +@trampg = internal global [36 x i8] zeroinitializer, align 8 + declare void @llvm.init.trampoline(ptr, ptr, ptr); declare ptr @llvm.adjust.trampoline(ptr); @@ -8,12 +10,23 @@ define i64 @f(ptr nest %c, i64 %x, i64 %y) { ret i64 %sum } -define i64 @main() { +define i64 @func1() { %val = alloca i64 %nval = bitcast ptr %val to ptr %tramp = alloca [36 x i8], align 8 + ; CHECK: mov w1, #36 ; CHECK: bl __trampoline_setup call void @llvm.init.trampoline(ptr %tramp, ptr @f, ptr %nval) %fp = call ptr @llvm.adjust.trampoline(ptr %tramp) ret i64 0 } + +define i64 @func2() { + %val = alloca i64 + %nval = bitcast ptr %val to ptr + ; CHECK: mov w1, #36 + ; CHECK: bl __trampoline_setup + call void @llvm.init.trampoline(ptr @trampg, ptr @f, ptr %nval) + %fp = call ptr @llvm.adjust.trampoline(ptr @trampg) + ret i64 0 +}