diff --git a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp index c4b43e1b51265..c52487ab8a79a 100644 --- a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp +++ b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp @@ -176,6 +176,9 @@ class AArch64AsmPrinter : public AsmPrinter { std::optional PACKey, uint64_t PACDisc, Register PACAddrDisc); + // Emit the sequence for PAC. + void emitPtrauthSign(const MachineInstr *MI); + // Emit the sequence to compute the discriminator. // // The returned register is either unmodified AddrDisc or ScratchReg. @@ -2175,6 +2178,37 @@ void AArch64AsmPrinter::emitPtrauthAuthResign( OutStreamer->emitLabel(EndSym); } +void AArch64AsmPrinter::emitPtrauthSign(const MachineInstr *MI) { + Register Val = MI->getOperand(1).getReg(); + auto Key = (AArch64PACKey::ID)MI->getOperand(2).getImm(); + uint64_t Disc = MI->getOperand(3).getImm(); + Register AddrDisc = MI->getOperand(4).getReg(); + bool AddrDiscKilled = MI->getOperand(4).isKill(); + + // As long as at least one of Val and AddrDisc is in GPR64noip, a scratch + // register is available. + Register ScratchReg = Val == AArch64::X16 ? AArch64::X17 : AArch64::X16; + assert(ScratchReg != AddrDisc && + "Neither X16 nor X17 is available as a scratch register"); + + // Compute pac discriminator + assert(isUInt<16>(Disc)); + Register DiscReg = emitPtrauthDiscriminator( + Disc, AddrDisc, ScratchReg, /*MayUseAddrAsScratch=*/AddrDiscKilled); + bool IsZeroDisc = DiscReg == AArch64::XZR; + unsigned Opc = getPACOpcodeForKey(Key, IsZeroDisc); + + // paciza x16 ; if IsZeroDisc + // pacia x16, x17 ; if !IsZeroDisc + MCInst PACInst; + PACInst.setOpcode(Opc); + PACInst.addOperand(MCOperand::createReg(Val)); + PACInst.addOperand(MCOperand::createReg(Val)); + if (!IsZeroDisc) + PACInst.addOperand(MCOperand::createReg(DiscReg)); + EmitToStreamer(*OutStreamer, PACInst); +} + void AArch64AsmPrinter::emitPtrauthBranch(const MachineInstr *MI) { bool IsCall = MI->getOpcode() == AArch64::BLRA; unsigned BrTarget = MI->getOperand(0).getReg(); @@ -2890,6 +2924,10 @@ void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) { MI->getOperand(4).getImm(), MI->getOperand(5).getReg()); return; + case AArch64::PAC: + emitPtrauthSign(MI); + return; + case AArch64::LOADauthptrstatic: LowerLOADauthptrstatic(*MI); return; diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index f026726c3f484..9407157b66ed1 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -3098,6 +3098,83 @@ AArch64TargetLowering::EmitGetSMESaveSize(MachineInstr &MI, return BB; } +// Helper function to find the instruction that defined a virtual register. +// If unable to find such instruction, returns nullptr. +static const MachineInstr *stripVRegCopies(const MachineRegisterInfo &MRI, + Register Reg) { + while (Reg.isVirtual()) { + MachineInstr *DefMI = MRI.getVRegDef(Reg); + assert(DefMI && "Virtual register definition not found"); + unsigned Opcode = DefMI->getOpcode(); + + if (Opcode == AArch64::COPY) { + Reg = DefMI->getOperand(1).getReg(); + // Vreg is defined by copying from physreg. + if (Reg.isPhysical()) + return DefMI; + continue; + } + if (Opcode == AArch64::SUBREG_TO_REG) { + Reg = DefMI->getOperand(2).getReg(); + continue; + } + + return DefMI; + } + return nullptr; +} + +void AArch64TargetLowering::fixupPtrauthDiscriminator( + MachineInstr &MI, MachineBasicBlock *BB, MachineOperand &IntDiscOp, + MachineOperand &AddrDiscOp, const TargetRegisterClass *AddrDiscRC) const { + const TargetInstrInfo *TII = Subtarget->getInstrInfo(); + MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); + const DebugLoc &DL = MI.getDebugLoc(); + + Register AddrDisc = AddrDiscOp.getReg(); + int64_t IntDisc = IntDiscOp.getImm(); + assert(IntDisc == 0 && "Blend components are already expanded"); + + const MachineInstr *DiscMI = stripVRegCopies(MRI, AddrDisc); + if (DiscMI) { + switch (DiscMI->getOpcode()) { + case AArch64::MOVKXi: + // blend(addr, imm) which is lowered as "MOVK addr, #imm, #48". + // #imm should be an immediate and not a global symbol, for example. + if (DiscMI->getOperand(2).isImm() && + DiscMI->getOperand(3).getImm() == 48) { + AddrDisc = DiscMI->getOperand(1).getReg(); + IntDisc = DiscMI->getOperand(2).getImm(); + } + break; + case AArch64::MOVi32imm: + case AArch64::MOVi64imm: + // Small immediate integer constant passed via VReg. + if (DiscMI->getOperand(1).isImm() && + isUInt<16>(DiscMI->getOperand(1).getImm())) { + AddrDisc = AArch64::NoRegister; + IntDisc = DiscMI->getOperand(1).getImm(); + } + break; + } + } + + // For uniformity, always use NoRegister, as XZR is not necessarily contained + // in the requested register class. + if (AddrDisc == AArch64::XZR) + AddrDisc = AArch64::NoRegister; + + // Make sure AddrDisc operand respects the register class imposed by MI. + if (AddrDisc && MRI.getRegClass(AddrDisc) != AddrDiscRC) { + Register TmpReg = MRI.createVirtualRegister(AddrDiscRC); + BuildMI(*BB, MI, DL, TII->get(AArch64::COPY), TmpReg).addReg(AddrDisc); + AddrDisc = TmpReg; + } + + AddrDiscOp.setReg(AddrDisc); + IntDiscOp.setImm(IntDisc); +} + MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter( MachineInstr &MI, MachineBasicBlock *BB) const { @@ -3196,6 +3273,11 @@ MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter( return EmitZTInstr(MI, BB, AArch64::ZERO_T, /*Op0IsDef=*/true); case AArch64::MOVT_TIZ_PSEUDO: return EmitZTInstr(MI, BB, AArch64::MOVT_TIZ, /*Op0IsDef=*/true); + + case AArch64::PAC: + fixupPtrauthDiscriminator(MI, BB, MI.getOperand(3), MI.getOperand(4), + &AArch64::GPR64noipRegClass); + return BB; } } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index 713793ec77da3..4d3dee98c1f44 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -182,6 +182,13 @@ class AArch64TargetLowering : public TargetLowering { MachineBasicBlock *EmitGetSMESaveSize(MachineInstr &MI, MachineBasicBlock *BB) const; + /// Replace (0, vreg) discriminator components with the operands of blend + /// or with (immediate, NoRegister) when possible. + void fixupPtrauthDiscriminator(MachineInstr &MI, MachineBasicBlock *BB, + MachineOperand &IntDiscOp, + MachineOperand &AddrDiscOp, + const TargetRegisterClass *AddrDiscRC) const; + MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override; diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td index 9f8a2571b076e..4bdff0923a53b 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -2032,7 +2032,7 @@ let Predicates = [HasPAuth] in { def DZB : SignAuthZero; } - defm PAC : SignAuth<0b000, 0b010, "pac", int_ptrauth_sign>; + defm PAC : SignAuth<0b000, 0b010, "pac", null_frag>; defm AUT : SignAuth<0b001, 0b011, "aut", null_frag>; def XPACI : ClearAuth<0, "xpaci">; @@ -2152,6 +2152,26 @@ let Predicates = [HasPAuth] in { let Uses = []; } + // PAC pseudo instruction. In AsmPrinter, it is expanded into an actual PAC* + // instruction immediately preceded by the discriminator computation. + // This enforces the expected immediate modifier is used for signing, even + // if an attacker is able to substitute AddrDisc. + def PAC : Pseudo<(outs GPR64:$SignedVal), + (ins GPR64:$Val, i32imm:$Key, i64imm:$Disc, GPR64noip:$AddrDisc), + [], "$SignedVal = $Val">, Sched<[WriteI, ReadI]> { + let isCodeGenOnly = 1; + let hasSideEffects = 0; + let mayStore = 0; + let mayLoad = 0; + let Size = 12; + let Defs = [X16, X17]; + let usesCustomInserter = 1; + } + + // A standalone pattern is used, so that literal 0 can be passed as $Disc. + def : Pat<(int_ptrauth_sign GPR64:$Val, timm:$Key, GPR64noip:$AddrDisc), + (PAC GPR64:$Val, $Key, 0, GPR64noip:$AddrDisc)>; + // AUT and re-PAC a value, using different keys/data. // This directly manipulates x16/x17, which are the only registers that // certain OSs guarantee are safe to use for sensitive operations. diff --git a/llvm/test/CodeGen/AArch64/ptrauth-isel.ll b/llvm/test/CodeGen/AArch64/ptrauth-isel.ll new file mode 100644 index 0000000000000..7011b946aad74 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/ptrauth-isel.ll @@ -0,0 +1,269 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple arm64e-apple-darwin -verify-machineinstrs -stop-after=finalize-isel -global-isel=0 \ +; RUN: | FileCheck %s --check-prefixes=DAGISEL +; RUN: llc < %s -mtriple arm64e-apple-darwin -verify-machineinstrs -stop-after=finalize-isel -global-isel=1 -global-isel-abort=1 \ +; RUN: | FileCheck %s --check-prefixes=GISEL +; RUN: llc < %s -mtriple aarch64-linux-gnu -mattr=+pauth -verify-machineinstrs -stop-after=finalize-isel -global-isel=0 \ +; RUN: | FileCheck %s --check-prefixes=DAGISEL +; RUN: llc < %s -mtriple aarch64-linux-gnu -mattr=+pauth -verify-machineinstrs -stop-after=finalize-isel -global-isel=1 -global-isel-abort=1 \ +; RUN: | FileCheck %s --check-prefixes=GISEL + +; Check MIR produced by the instruction selector to validate properties that +; cannot be reliably tested by only inspecting the final asm output. + +@discvar = dso_local global i64 0 + +; Make sure the components of blend(addr, imm) and integer constants are +; recognized and passed to PAC pseudo via separate operands to prevent +; substitution of the immediate modifier. +; +; MIR output of the instruction selector is inspected, as it is hard to reliably +; distinguish MOVKXi immediately followed by a pseudo from a standalone pseudo +; instruction carrying address and immediate modifiers in its separate operands +; by only observing the final asm output. + +define i64 @small_imm_disc_optimized(i64 %addr) { + ; DAGISEL-LABEL: name: small_imm_disc_optimized + ; DAGISEL: bb.0.entry: + ; DAGISEL-NEXT: liveins: $x0 + ; DAGISEL-NEXT: {{ $}} + ; DAGISEL-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; DAGISEL-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 42 + ; DAGISEL-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64noip = SUBREG_TO_REG 0, killed [[MOVi32imm]], %subreg.sub_32 + ; DAGISEL-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY]], 2, 42, killed $noreg, implicit-def dead $x16, implicit-def dead $x17 + ; DAGISEL-NEXT: $x0 = COPY [[PAC]] + ; DAGISEL-NEXT: RET_ReallyLR implicit $x0 + ; + ; GISEL-LABEL: name: small_imm_disc_optimized + ; GISEL: bb.1.entry: + ; GISEL-NEXT: liveins: $x0 + ; GISEL-NEXT: {{ $}} + ; GISEL-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; GISEL-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 42 + ; GISEL-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64noip = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 + ; GISEL-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY]], 2, 42, $noreg, implicit-def dead $x16, implicit-def dead $x17 + ; GISEL-NEXT: $x0 = COPY [[PAC]] + ; GISEL-NEXT: RET_ReallyLR implicit $x0 +entry: + %signed = call i64 @llvm.ptrauth.sign(i64 %addr, i32 2, i64 42) + ret i64 %signed +} + +; Without optimization, MOVi64imm may be used for small i64 constants as well. +define i64 @small_imm_disc_non_optimized(i64 %addr) noinline optnone { + ; DAGISEL-LABEL: name: small_imm_disc_non_optimized + ; DAGISEL: bb.0.entry: + ; DAGISEL-NEXT: liveins: $x0 + ; DAGISEL-NEXT: {{ $}} + ; DAGISEL-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; DAGISEL-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY killed [[COPY]] + ; DAGISEL-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 42 + ; DAGISEL-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64noip = SUBREG_TO_REG 0, killed [[MOVi32imm]], %subreg.sub_32 + ; DAGISEL-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY1]], 2, 42, killed $noreg, implicit-def dead $x16, implicit-def dead $x17 + ; DAGISEL-NEXT: [[COPY2:%[0-9]+]]:gpr64all = COPY [[PAC]] + ; DAGISEL-NEXT: $x0 = COPY [[COPY2]] + ; DAGISEL-NEXT: RET_ReallyLR implicit $x0 + ; + ; GISEL-LABEL: name: small_imm_disc_non_optimized + ; GISEL: bb.1.entry: + ; GISEL-NEXT: liveins: $x0 + ; GISEL-NEXT: {{ $}} + ; GISEL-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; GISEL-NEXT: [[MOVi64imm:%[0-9]+]]:gpr64noip = MOVi64imm 42 + ; GISEL-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY]], 2, 42, $noreg, implicit-def dead $x16, implicit-def dead $x17 + ; GISEL-NEXT: $x0 = COPY [[PAC]] + ; GISEL-NEXT: RET_ReallyLR implicit $x0 +entry: + %signed = call i64 @llvm.ptrauth.sign(i64 %addr, i32 2, i64 42) + ret i64 %signed +} + +define i64 @large_imm_disc_wreg(i64 %addr) { + ; DAGISEL-LABEL: name: large_imm_disc_wreg + ; DAGISEL: bb.0.entry: + ; DAGISEL-NEXT: liveins: $x0 + ; DAGISEL-NEXT: {{ $}} + ; DAGISEL-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; DAGISEL-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 12345678 + ; DAGISEL-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64noip = SUBREG_TO_REG 0, killed [[MOVi32imm]], %subreg.sub_32 + ; DAGISEL-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY]], 2, 0, killed [[SUBREG_TO_REG]], implicit-def dead $x16, implicit-def dead $x17 + ; DAGISEL-NEXT: $x0 = COPY [[PAC]] + ; DAGISEL-NEXT: RET_ReallyLR implicit $x0 + ; + ; GISEL-LABEL: name: large_imm_disc_wreg + ; GISEL: bb.1.entry: + ; GISEL-NEXT: liveins: $x0 + ; GISEL-NEXT: {{ $}} + ; GISEL-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; GISEL-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 12345678 + ; GISEL-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64noip = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 + ; GISEL-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY]], 2, 0, [[SUBREG_TO_REG]], implicit-def dead $x16, implicit-def dead $x17 + ; GISEL-NEXT: $x0 = COPY [[PAC]] + ; GISEL-NEXT: RET_ReallyLR implicit $x0 +entry: + %signed = call i64 @llvm.ptrauth.sign(i64 %addr, i32 2, i64 12345678) + ret i64 %signed +} + +define i64 @large_imm_disc_xreg(i64 %addr) { + ; DAGISEL-LABEL: name: large_imm_disc_xreg + ; DAGISEL: bb.0.entry: + ; DAGISEL-NEXT: liveins: $x0 + ; DAGISEL-NEXT: {{ $}} + ; DAGISEL-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; DAGISEL-NEXT: [[MOVi64imm:%[0-9]+]]:gpr64noip = MOVi64imm 123456789012345 + ; DAGISEL-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY]], 2, 0, killed [[MOVi64imm]], implicit-def dead $x16, implicit-def dead $x17 + ; DAGISEL-NEXT: $x0 = COPY [[PAC]] + ; DAGISEL-NEXT: RET_ReallyLR implicit $x0 + ; + ; GISEL-LABEL: name: large_imm_disc_xreg + ; GISEL: bb.1.entry: + ; GISEL-NEXT: liveins: $x0 + ; GISEL-NEXT: {{ $}} + ; GISEL-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; GISEL-NEXT: [[MOVi64imm:%[0-9]+]]:gpr64noip = MOVi64imm 123456789012345 + ; GISEL-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY]], 2, 0, [[MOVi64imm]], implicit-def dead $x16, implicit-def dead $x17 + ; GISEL-NEXT: $x0 = COPY [[PAC]] + ; GISEL-NEXT: RET_ReallyLR implicit $x0 +entry: + %signed = call i64 @llvm.ptrauth.sign(i64 %addr, i32 2, i64 123456789012345) + ret i64 %signed +} + +; Make sure blend() is lowered as expected when optimization is disabled. +define i64 @blended_disc_non_optimized(i64 %addr, i64 %addrdisc) noinline optnone { + ; DAGISEL-LABEL: name: blended_disc_non_optimized + ; DAGISEL: bb.0.entry: + ; DAGISEL-NEXT: liveins: $x0, $x1 + ; DAGISEL-NEXT: {{ $}} + ; DAGISEL-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x1 + ; DAGISEL-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x0 + ; DAGISEL-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY killed [[COPY1]] + ; DAGISEL-NEXT: [[COPY3:%[0-9]+]]:gpr64 = COPY killed [[COPY]] + ; DAGISEL-NEXT: [[MOVKXi:%[0-9]+]]:gpr64 = MOVKXi [[COPY3]], 42, 48 + ; DAGISEL-NEXT: [[COPY4:%[0-9]+]]:gpr64noip = COPY [[MOVKXi]] + ; DAGISEL-NEXT: [[COPY5:%[0-9]+]]:gpr64noip = COPY [[COPY3]] + ; DAGISEL-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY2]], 2, 42, [[COPY5]], implicit-def dead $x16, implicit-def dead $x17 + ; DAGISEL-NEXT: [[COPY6:%[0-9]+]]:gpr64all = COPY [[PAC]] + ; DAGISEL-NEXT: $x0 = COPY [[COPY6]] + ; DAGISEL-NEXT: RET_ReallyLR implicit $x0 + ; + ; GISEL-LABEL: name: blended_disc_non_optimized + ; GISEL: bb.1.entry: + ; GISEL-NEXT: liveins: $x0, $x1 + ; GISEL-NEXT: {{ $}} + ; GISEL-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; GISEL-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 + ; GISEL-NEXT: [[MOVKXi:%[0-9]+]]:gpr64noip = MOVKXi [[COPY1]], 42, 48 + ; GISEL-NEXT: [[COPY2:%[0-9]+]]:gpr64noip = COPY [[COPY1]] + ; GISEL-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY]], 2, 42, [[COPY2]], implicit-def dead $x16, implicit-def dead $x17 + ; GISEL-NEXT: $x0 = COPY [[PAC]] + ; GISEL-NEXT: RET_ReallyLR implicit $x0 +entry: + %disc = call i64 @llvm.ptrauth.blend(i64 %addrdisc, i64 42) + %signed = call i64 @llvm.ptrauth.sign(i64 %addr, i32 2, i64 %disc) + ret i64 %signed +} + +define i64 @blend_and_sign_same_bb(i64 %addr) { + ; DAGISEL-LABEL: name: blend_and_sign_same_bb + ; DAGISEL: bb.0.entry: + ; DAGISEL-NEXT: liveins: $x0 + ; DAGISEL-NEXT: {{ $}} + ; DAGISEL-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; DAGISEL-NEXT: [[ADRP:%[0-9]+]]:gpr64common = ADRP target-flags(aarch64-page) @discvar + ; DAGISEL-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui killed [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) @discvar :: (dereferenceable load (s64) from @discvar) + ; DAGISEL-NEXT: [[MOVKXi:%[0-9]+]]:gpr64noip = MOVKXi [[LDRXui]], 42, 48 + ; DAGISEL-NEXT: [[COPY1:%[0-9]+]]:gpr64noip = COPY [[LDRXui]] + ; DAGISEL-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY]], 2, 42, killed [[COPY1]], implicit-def dead $x16, implicit-def dead $x17 + ; DAGISEL-NEXT: $x0 = COPY [[PAC]] + ; DAGISEL-NEXT: RET_ReallyLR implicit $x0 + ; + ; GISEL-LABEL: name: blend_and_sign_same_bb + ; GISEL: bb.1.entry: + ; GISEL-NEXT: liveins: $x0 + ; GISEL-NEXT: {{ $}} + ; GISEL-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; GISEL-NEXT: [[ADRP:%[0-9]+]]:gpr64common = ADRP target-flags(aarch64-page) @discvar + ; GISEL-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) @discvar :: (dereferenceable load (s64) from @discvar) + ; GISEL-NEXT: [[MOVKXi:%[0-9]+]]:gpr64noip = MOVKXi [[LDRXui]], 42, 48 + ; GISEL-NEXT: [[COPY1:%[0-9]+]]:gpr64noip = COPY [[LDRXui]] + ; GISEL-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY]], 2, 42, [[COPY1]], implicit-def dead $x16, implicit-def dead $x17 + ; GISEL-NEXT: $x0 = COPY [[PAC]] + ; GISEL-NEXT: RET_ReallyLR implicit $x0 +entry: + %addrdisc = load i64, ptr @discvar + %disc = call i64 @llvm.ptrauth.blend(i64 %addrdisc, i64 42) + %signed = call i64 @llvm.ptrauth.sign(i64 %addr, i32 2, i64 %disc) + ret i64 %signed +} + +; In the below test cases both %addrdisc and %disc are computed (i.e. they are +; neither global addresses, nor function arguments) in a different basic block, +; making them harder to express via ISD::PtrAuthGlobalAddress. + +define i64 @blend_and_sign_different_bbs(i64 %addr, i64 %cond) { + ; DAGISEL-LABEL: name: blend_and_sign_different_bbs + ; DAGISEL: bb.0.entry: + ; DAGISEL-NEXT: successors: %bb.1(0x50000000), %bb.2(0x30000000) + ; DAGISEL-NEXT: liveins: $x0, $x1 + ; DAGISEL-NEXT: {{ $}} + ; DAGISEL-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x1 + ; DAGISEL-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x0 + ; DAGISEL-NEXT: [[ADRP:%[0-9]+]]:gpr64common = ADRP target-flags(aarch64-page) @discvar + ; DAGISEL-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui killed [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) @discvar :: (dereferenceable load (s64) from @discvar) + ; DAGISEL-NEXT: [[MOVKXi:%[0-9]+]]:gpr64 = MOVKXi [[LDRXui]], 42, 48 + ; DAGISEL-NEXT: [[COPY2:%[0-9]+]]:gpr64noip = COPY [[MOVKXi]] + ; DAGISEL-NEXT: CBZX [[COPY]], %bb.2 + ; DAGISEL-NEXT: B %bb.1 + ; DAGISEL-NEXT: {{ $}} + ; DAGISEL-NEXT: bb.1.next: + ; DAGISEL-NEXT: successors: %bb.2(0x80000000) + ; DAGISEL-NEXT: {{ $}} + ; DAGISEL-NEXT: [[COPY3:%[0-9]+]]:gpr64common = COPY [[COPY2]] + ; DAGISEL-NEXT: INLINEASM &nop, 1 /* sideeffect attdialect */, 3866633 /* reguse:GPR64common */, [[COPY3]] + ; DAGISEL-NEXT: {{ $}} + ; DAGISEL-NEXT: bb.2.exit: + ; DAGISEL-NEXT: [[COPY4:%[0-9]+]]:gpr64noip = COPY [[LDRXui]] + ; DAGISEL-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY1]], 2, 42, [[COPY4]], implicit-def dead $x16, implicit-def dead $x17 + ; DAGISEL-NEXT: $x0 = COPY [[PAC]] + ; DAGISEL-NEXT: RET_ReallyLR implicit $x0 + ; + ; GISEL-LABEL: name: blend_and_sign_different_bbs + ; GISEL: bb.1.entry: + ; GISEL-NEXT: successors: %bb.2(0x50000000), %bb.3(0x30000000) + ; GISEL-NEXT: liveins: $x0, $x1 + ; GISEL-NEXT: {{ $}} + ; GISEL-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; GISEL-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 + ; GISEL-NEXT: [[ADRP:%[0-9]+]]:gpr64common = ADRP target-flags(aarch64-page) @discvar + ; GISEL-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) @discvar :: (dereferenceable load (s64) from @discvar) + ; GISEL-NEXT: [[MOVKXi:%[0-9]+]]:gpr64noip = MOVKXi [[LDRXui]], 42, 48 + ; GISEL-NEXT: CBZX [[COPY1]], %bb.3 + ; GISEL-NEXT: B %bb.2 + ; GISEL-NEXT: {{ $}} + ; GISEL-NEXT: bb.2.next: + ; GISEL-NEXT: successors: %bb.3(0x80000000) + ; GISEL-NEXT: {{ $}} + ; GISEL-NEXT: [[COPY2:%[0-9]+]]:gpr64common = COPY [[MOVKXi]] + ; GISEL-NEXT: INLINEASM &nop, 1 /* sideeffect attdialect */, 3866633 /* reguse:GPR64common */, [[COPY2]] + ; GISEL-NEXT: {{ $}} + ; GISEL-NEXT: bb.3.exit: + ; GISEL-NEXT: [[COPY3:%[0-9]+]]:gpr64noip = COPY [[LDRXui]] + ; GISEL-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY]], 2, 42, [[COPY3]], implicit-def dead $x16, implicit-def dead $x17 + ; GISEL-NEXT: $x0 = COPY [[PAC]] + ; GISEL-NEXT: RET_ReallyLR implicit $x0 +entry: + %addrdisc = load i64, ptr @discvar + %disc = call i64 @llvm.ptrauth.blend(i64 %addrdisc, i64 42) + %cond.b = icmp ne i64 %cond, 0 + br i1 %cond.b, label %next, label %exit + +next: + call void asm sideeffect "nop", "r"(i64 %disc) + br label %exit + +exit: + %signed = call i64 @llvm.ptrauth.sign(i64 %addr, i32 2, i64 %disc) + ret i64 %signed +} diff --git a/llvm/test/CodeGen/AArch64/ptrauth-isel.mir b/llvm/test/CodeGen/AArch64/ptrauth-isel.mir new file mode 100644 index 0000000000000..1a155887059e3 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/ptrauth-isel.mir @@ -0,0 +1,205 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 +# RUN: llc -o - %s -mtriple arm64e-apple-darwin -verify-machineinstrs \ +# RUN: -stop-after=finalize-isel -start-before=finalize-isel | FileCheck %s +# RUN: llc -o - %s -mtriple aarch64-linux-gnu -mattr=+pauth -verify-machineinstrs \ +# RUN: -stop-after=finalize-isel -start-before=finalize-isel | FileCheck %s + +# This MIR-based test contains several test cases that are hard to implement +# via an LLVM IR input. Most other test cases are in ptrauth-isel.ll file. + +--- | + @globalvar = dso_local global i64 0 + + define i64 @movk_correct_blend(i64 %a, i64 %b) { + entry: + ret i64 0 + } + + define i64 @movk_wrong_shift_amount(i64 %a, i64 %b) { + entry: + ret i64 0 + } + + define i64 @movk_non_immediate_operand(i64 %a, i64 %b) { + entry: + ret i64 0 + } + + define i64 @movi64imm_immediate_operand(i64 %a) { + entry: + ret i64 0 + } + + define i64 @movi64imm_non_immediate_operand(i64 %a) { + entry: + ret i64 0 + } + + define i64 @movi32imm_immediate_operand(i64 %a) { + entry: + ret i64 0 + } + + define i64 @movi32imm_non_immediate_operand(i64 %a) { + entry: + ret i64 0 + } +... +--- +name: movk_correct_blend +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x0, $x1 + + ; CHECK-LABEL: name: movk_correct_blend + ; CHECK: liveins: $x0, $x1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 + ; CHECK-NEXT: [[MOVKXi:%[0-9]+]]:gpr64noip = MOVKXi [[COPY1]], 42, 48 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64noip = COPY [[COPY1]] + ; CHECK-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY]], 2, 42, killed [[COPY2]], implicit-def dead $x16, implicit-def dead $x17 + ; CHECK-NEXT: $x0 = COPY [[PAC]] + ; CHECK-NEXT: RET_ReallyLR implicit $x0 + %0:gpr64 = COPY $x0 + %1:gpr64 = COPY $x1 + %2:gpr64noip = MOVKXi %1, 42, 48 + %3:gpr64 = PAC %0, 2, 0, killed %2, implicit-def dead $x16, implicit-def dead $x17 + $x0 = COPY %3 + RET_ReallyLR implicit $x0 +... +--- +name: movk_wrong_shift_amount +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x0, $x1 + + ; CHECK-LABEL: name: movk_wrong_shift_amount + ; CHECK: liveins: $x0, $x1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 + ; CHECK-NEXT: [[MOVKXi:%[0-9]+]]:gpr64noip = MOVKXi [[COPY1]], 42, 0 + ; CHECK-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY]], 2, 0, killed [[MOVKXi]], implicit-def dead $x16, implicit-def dead $x17 + ; CHECK-NEXT: $x0 = COPY [[PAC]] + ; CHECK-NEXT: RET_ReallyLR implicit $x0 + %0:gpr64 = COPY $x0 + %1:gpr64 = COPY $x1 + %2:gpr64noip = MOVKXi %1, 42, 0 + %3:gpr64 = PAC %0, 2, 0, killed %2, implicit-def dead $x16, implicit-def dead $x17 + $x0 = COPY %3 + RET_ReallyLR implicit $x0 +... +--- +name: movk_non_immediate_operand +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x0, $x1 + + ; CHECK-LABEL: name: movk_non_immediate_operand + ; CHECK: liveins: $x0, $x1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 + ; CHECK-NEXT: [[MOVKXi:%[0-9]+]]:gpr64noip = MOVKXi [[COPY1]], target-flags(aarch64-pageoff, aarch64-nc) @globalvar, 48 + ; CHECK-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY]], 2, 0, killed [[MOVKXi]], implicit-def dead $x16, implicit-def dead $x17 + ; CHECK-NEXT: $x0 = COPY [[PAC]] + ; CHECK-NEXT: RET_ReallyLR implicit $x0 + %0:gpr64 = COPY $x0 + %1:gpr64 = COPY $x1 + %2:gpr64noip = MOVKXi %1, target-flags(aarch64-pageoff, aarch64-nc) @globalvar, 48 + %3:gpr64 = PAC %0, 2, 0, killed %2, implicit-def dead $x16, implicit-def dead $x17 + $x0 = COPY %3 + RET_ReallyLR implicit $x0 +... +--- +name: movi64imm_immediate_operand +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x0, $x1 + + ; CHECK-LABEL: name: movi64imm_immediate_operand + ; CHECK: liveins: $x0, $x1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK-NEXT: [[MOVi64imm:%[0-9]+]]:gpr64noip = MOVi64imm 42 + ; CHECK-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY]], 2, 42, killed $noreg, implicit-def dead $x16, implicit-def dead $x17 + ; CHECK-NEXT: $x0 = COPY [[PAC]] + ; CHECK-NEXT: RET_ReallyLR implicit $x0 + %0:gpr64 = COPY $x0 + %1:gpr64noip = MOVi64imm 42 + %2:gpr64 = PAC %0, 2, 0, killed %1, implicit-def dead $x16, implicit-def dead $x17 + $x0 = COPY %2 + RET_ReallyLR implicit $x0 +... +--- +name: movi64imm_non_immediate_operand +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x0, $x1 + + ; CHECK-LABEL: name: movi64imm_non_immediate_operand + ; CHECK: liveins: $x0, $x1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK-NEXT: [[MOVi64imm:%[0-9]+]]:gpr64noip = MOVi64imm target-flags(aarch64-pageoff, aarch64-nc) @globalvar + ; CHECK-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY]], 2, 0, killed [[MOVi64imm]], implicit-def dead $x16, implicit-def dead $x17 + ; CHECK-NEXT: $x0 = COPY [[PAC]] + ; CHECK-NEXT: RET_ReallyLR implicit $x0 + %0:gpr64 = COPY $x0 + %1:gpr64noip = MOVi64imm target-flags(aarch64-pageoff, aarch64-nc) @globalvar + %2:gpr64 = PAC %0, 2, 0, killed %1, implicit-def dead $x16, implicit-def dead $x17 + $x0 = COPY %2 + RET_ReallyLR implicit $x0 +... +--- +name: movi32imm_immediate_operand +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x0, $x1 + + ; CHECK-LABEL: name: movi32imm_immediate_operand + ; CHECK: liveins: $x0, $x1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 42 + ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64noip = SUBREG_TO_REG 0, killed [[MOVi32imm]], %subreg.sub_32 + ; CHECK-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY]], 2, 42, killed $noreg, implicit-def dead $x16, implicit-def dead $x17 + ; CHECK-NEXT: $x0 = COPY [[PAC]] + ; CHECK-NEXT: RET_ReallyLR implicit $x0 + %0:gpr64 = COPY $x0 + %1:gpr32 = MOVi32imm 42 + %2:gpr64noip = SUBREG_TO_REG 0, killed %1, %subreg.sub_32 + %3:gpr64 = PAC %0, 2, 0, killed %2, implicit-def dead $x16, implicit-def dead $x17 + $x0 = COPY %3 + RET_ReallyLR implicit $x0 +... +--- +name: movi32imm_non_immediate_operand +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x0, $x1 + + ; CHECK-LABEL: name: movi32imm_non_immediate_operand + ; CHECK: liveins: $x0, $x1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm target-flags(aarch64-pageoff, aarch64-nc) @globalvar + ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64noip = SUBREG_TO_REG 0, killed [[MOVi32imm]], %subreg.sub_32 + ; CHECK-NEXT: [[PAC:%[0-9]+]]:gpr64 = PAC [[COPY]], 2, 0, killed [[SUBREG_TO_REG]], implicit-def dead $x16, implicit-def dead $x17 + ; CHECK-NEXT: $x0 = COPY [[PAC]] + ; CHECK-NEXT: RET_ReallyLR implicit $x0 + %0:gpr64 = COPY $x0 + %1:gpr32 = MOVi32imm target-flags(aarch64-pageoff, aarch64-nc) @globalvar + %2:gpr64noip = SUBREG_TO_REG 0, killed %1, %subreg.sub_32 + %3:gpr64 = PAC %0, 2, 0, killed %2, implicit-def dead $x16, implicit-def dead $x17 + $x0 = COPY %3 + RET_ReallyLR implicit $x0 +...