diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp index d38db71a65797..f1f7378171bdd 100644 --- a/clang/lib/CodeGen/CGStmt.cpp +++ b/clang/lib/CodeGen/CGStmt.cpp @@ -2796,7 +2796,7 @@ EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, Tmp = Builder.CreateIntToPtr(Tmp, TruncTy); } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) { uint64_t TmpSize = - CGM.getDataLayout().getTypeSizeInBits(Tmp->getType()); + CGM.getDataLayout().getIndexTypeSizeInBits(Tmp->getType()); Tmp = Builder.CreatePtrToInt( Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize)); Tmp = Builder.CreateTrunc(Tmp, TruncTy); diff --git a/clang/test/CodeGen/cheri/cheri-asm-ptrtoint.c b/clang/test/CodeGen/cheri/cheri-asm-ptrtoint.c new file mode 100644 index 0000000000000..ab5152cc705b4 --- /dev/null +++ b/clang/test/CodeGen/cheri/cheri-asm-ptrtoint.c @@ -0,0 +1,24 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 +// RUN: %riscv32_cheri_purecap_cc1 -disable-O0-optnone %s -emit-llvm -o - | opt -passes=mem2reg -S -o - | FileCheck %s --check-prefix=RV32 +// RUN: %riscv64_cheri_purecap_cc1 -disable-O0-optnone %s -emit-llvm -o - | opt -passes=mem2reg -S -o - | FileCheck %s --check-prefix=RV64 + +// RV32-LABEL: define dso_local i32 @ptr_to_int +// RV32-SAME: (ptr addrspace(200) noundef [[P:%.*]]) addrspace(200) #[[ATTR0:[0-9]+]] { +// RV32-NEXT: entry: +// RV32-NEXT: [[TMP0:%.*]] = call ptr addrspace(200) asm "", "=C,0"(ptr addrspace(200) [[P]]) #[[ATTR1:[0-9]+]], !srcloc [[META6:![0-9]+]] +// RV32-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(200) [[TMP0]] to i32 +// RV32-NEXT: ret i32 [[TMP1]] +// +// RV64-LABEL: define dso_local signext i32 @ptr_to_int +// RV64-SAME: (ptr addrspace(200) noundef [[P:%.*]]) addrspace(200) #[[ATTR0:[0-9]+]] { +// RV64-NEXT: entry: +// RV64-NEXT: [[TMP0:%.*]] = call ptr addrspace(200) asm "", "=C,0"(ptr addrspace(200) [[P]]) #[[ATTR1:[0-9]+]], !srcloc [[META6:![0-9]+]] +// RV64-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(200) [[TMP0]] to i64 +// RV64-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32 +// RV64-NEXT: ret i32 [[TMP2]] +// +int ptr_to_int(void *p) { + int i; + asm ("" : "=C"(i) : "0"(p)); + return i; +} diff --git a/lld/ELF/Arch/Cheri.cpp b/lld/ELF/Arch/Cheri.cpp index 0b18824f0d68a..434005bdffac1 100644 --- a/lld/ELF/Arch/Cheri.cpp +++ b/lld/ELF/Arch/Cheri.cpp @@ -312,14 +312,16 @@ static uint64_t getTargetSize(Ctx &ctx, const CheriCapRelocLocation &location, return targetSize; } -template -struct CaptablePermissions { - static const uint64_t function = UINT64_C(1) - << ((sizeof(typename ELFT::uint) * 8) - 1); - static const uint64_t readOnly = UINT64_C(1) - << ((sizeof(typename ELFT::uint) * 8) - 2); - static const uint64_t indirect = UINT64_C(1) - << ((sizeof(typename ELFT::uint) * 8) - 3); +template struct CapRelocPermission { + static constexpr uint64_t permissionBit(uint64_t bit) { + return UINT64_C(1) << ((sizeof(typename ELFT::uint) * 8) - bit); + } + + // clang-format off + static const uint64_t function = permissionBit(1); + static const uint64_t readOnly = permissionBit(2); + static const uint64_t indirect = permissionBit(3); + // clang-format on }; template @@ -372,14 +374,14 @@ void CheriCapRelocsSection::writeToImpl(uint8_t *buf) { uint64_t permissions = 0; // Fow now Function implies ReadOnly so don't add the flag if (isFunc || isGnuIFunc) { - permissions |= CaptablePermissions::function; + permissions |= CapRelocPermission::function; if (isGnuIFunc) - permissions |= CaptablePermissions::indirect; + permissions |= CapRelocPermission::indirect; } else if (os) { assert(!isTls); // if ((OS->getPhdrFlags() & PF_W) == 0) { if (((os->flags & SHF_WRITE) == 0) || isRelroSection(ctx, os)) { - permissions |= CaptablePermissions::readOnly; + permissions |= CapRelocPermission::readOnly; } else if (os->flags & SHF_EXECINSTR) { #if 0 // This generates a load of annoying spurious warnings with CHERIoT. @@ -690,8 +692,8 @@ uint64_t MipsCheriCapTableSection::assignIndices(uint64_t startIndex, // relocation instead which allows the runtime linker to create non-unique // plt stubs. RelType elfCapabilityReloc = it.second.usedInCallExpr - ? *ctx.target->cheriCapCallRel - : *ctx.target->cheriCapRel; + ? *ctx.target->symbolicCapCallRel + : *ctx.target->symbolicCapRel; // All capability call relocations should end up in the pltrel section // rather than the normal relocation section to make processing of PLT // relocations in RTLD more efficient. @@ -903,7 +905,7 @@ static bool needsCheriMipsTrampoline(Ctx &ctx, RelType type, if (ctx.arg.emachine != EM_MIPS) return false; - if (!sym.isFunc() || type == *ctx.target->cheriCapCallRel) + if (!sym.isFunc() || type == *ctx.target->symbolicCapCallRel) return false; // In static binaries we do not need PLT stubs for function pointers since diff --git a/lld/ELF/Arch/Mips.cpp b/lld/ELF/Arch/Mips.cpp index 54b68569a7e79..c12f98e3c75fd 100644 --- a/lld/ELF/Arch/Mips.cpp +++ b/lld/ELF/Arch/Mips.cpp @@ -48,8 +48,8 @@ template MIPS::MIPS(Ctx &ctx) : TargetInfo(ctx) { pltHeaderSize = 32; copyRel = R_MIPS_COPY; pltRel = R_MIPS_JUMP_SLOT; - cheriCapRel = R_MIPS_CHERI_CAPABILITY; - cheriCapCallRel = R_MIPS_CHERI_CAPABILITY_CALL; + symbolicCapRel = R_MIPS_CHERI_CAPABILITY; + symbolicCapCallRel = R_MIPS_CHERI_CAPABILITY_CALL; needsThunks = true; // Set `sigrie 1` as a trap instruction. @@ -237,7 +237,7 @@ RelExpr MIPS::getRelExpr(RelType type, const Symbol &s, } template RelType MIPS::getDynRel(RelType type) const { - if (type == symbolicRel || type == cheriCapRel) + if (type == symbolicRel || type == symbolicCapRel) return type; return R_MIPS_NONE; } diff --git a/lld/ELF/Arch/RISCV.cpp b/lld/ELF/Arch/RISCV.cpp index 196027d693935..92a22fb6bb9d6 100644 --- a/lld/ELF/Arch/RISCV.cpp +++ b/lld/ELF/Arch/RISCV.cpp @@ -127,7 +127,7 @@ RISCV::RISCV(Ctx &ctx) : TargetInfo(ctx) { pltRel = R_RISCV_JUMP_SLOT; relativeRel = R_RISCV_RELATIVE; iRelativeRel = R_RISCV_IRELATIVE; - cheriCapRel = R_RISCV_CHERI_CAPABILITY; + symbolicCapRel = R_RISCV_CHERI_CAPABILITY; if (ctx.arg.is64) { symbolicRel = R_RISCV_64; tlsModuleIndexRel = R_RISCV_TLS_DTPMOD64; @@ -140,7 +140,7 @@ RISCV::RISCV(Ctx &ctx) : TargetInfo(ctx) { tlsGotRel = R_RISCV_TLS_TPREL32; } if (ctx.arg.isCheriAbi) - gotRel = *cheriCapRel; + gotRel = *symbolicCapRel; else gotRel = symbolicRel; tlsDescRel = R_RISCV_TLSDESC; @@ -308,7 +308,7 @@ void RISCV::writePlt(uint8_t *buf, const Symbol &sym, } RelType RISCV::getDynRel(RelType type) const { - return type == symbolicRel || type == cheriCapRel + return type == symbolicRel || type == symbolicCapRel ? type : static_cast(R_RISCV_NONE); } diff --git a/lld/ELF/Relocations.cpp b/lld/ELF/Relocations.cpp index b0003c5051703..447bc25fdf20b 100644 --- a/lld/ELF/Relocations.cpp +++ b/lld/ELF/Relocations.cpp @@ -950,12 +950,12 @@ static void addPltEntry(Ctx &ctx, PltSection &plt, GotPltSection &gotPlt, if (!sym.isPreemptible) { addRelativeCapabilityRelocation(ctx, gotPlt, sym.getGotPltOffset(ctx), &sym, 0, R_ABS_CAP, - *ctx.target->cheriCapRel); + *ctx.target->symbolicCapRel); return; } addRelativeCapabilityRelocation(ctx, gotPlt, sym.getGotPltOffset(ctx), &plt, - 0, R_ABS_CAP, *ctx.target->cheriCapRel); + 0, R_ABS_CAP, *ctx.target->symbolicCapRel); } rel.addReloc({type, &gotPlt, sym.getGotPltOffset(ctx), @@ -978,7 +978,9 @@ void elf::addGotEntry(Ctx &ctx, Symbol &sym) { } RelType type = - ctx.arg.isCheriAbi ? *ctx.target->cheriCapRel : ctx.target->symbolicRel; + + ctx.arg.isCheriAbi ? *ctx.target->symbolicCapRel + : ctx.target->symbolicRel; // Otherwise, the value is either a link-time constant or the load base // plus a constant. For CHERI it always requires run-time initialisation, @@ -1244,7 +1246,8 @@ void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset, if (canWrite) { RelType rel = ctx.target->getDynRel(type); if (oneof(expr) || - ((rel == ctx.target->symbolicRel || rel == ctx.target->cheriCapRel) && + ((rel == ctx.target->symbolicRel || + rel == ctx.target->symbolicCapRel) && !sym.isPreemptible)) { addRelativeReloc(ctx, *sec, offset, sym, addend, expr, type); return; diff --git a/lld/ELF/SyntheticSections.cpp b/lld/ELF/SyntheticSections.cpp index 5f71e3bbf43e1..d6d8eec622f16 100644 --- a/lld/ELF/SyntheticSections.cpp +++ b/lld/ELF/SyntheticSections.cpp @@ -1749,8 +1749,8 @@ RelocationBaseSection::RelocationBaseSection(Ctx &ctx, StringRef name, void RelocationBaseSection::addSymbolReloc( RelType dynType, InputSectionBase &isec, uint64_t offsetInSec, Symbol &sym, int64_t addend, std::optional addendRelType) { - bool isCap = dynType == ctx.target->cheriCapRel || - dynType == ctx.target->cheriCapCallRel; + bool isCap = dynType == ctx.target->symbolicCapRel || + dynType == ctx.target->symbolicCapCallRel; if (isCap && sym.isFunc() && addend != 0) { auto diag = Warn(ctx); diag << "capability relocation with non-zero addend (0x" diff --git a/lld/ELF/Target.h b/lld/ELF/Target.h index 45239d320f866..329811326945c 100644 --- a/lld/ELF/Target.h +++ b/lld/ELF/Target.h @@ -138,8 +138,8 @@ class TargetInfo { RelType tlsOffsetRel = 0; std::optional absPointerRel; // TODO: remove the optional std::optional sizeRel; - std::optional cheriCapRel; - std::optional cheriCapCallRel; + std::optional symbolicCapRel; + std::optional symbolicCapCallRel; unsigned gotEntrySize = ctx.arg.wordsize; unsigned pltEntrySize = 0; unsigned pltHeaderSize = 0; diff --git a/lld/test/ELF/cheri/exception-table.ll b/lld/test/ELF/cheri/exception-table.ll index a104e5f70dbfc..a50e551256564 100644 --- a/lld/test/ELF/cheri/exception-table.ll +++ b/lld/test/ELF/cheri/exception-table.ll @@ -8,19 +8,19 @@ ; RUN: llvm-readobj -r %t/riscv.o | FileCheck %s --check-prefix=RV64-OBJ-RELOCS ;; Should have two relocations against a local alias for _Z4testll ; MIPS-OBJ-RELOCS: Section ({{.+}}) .rela.gcc_except_table { -; MIPS-OBJ-RELOCS-NEXT: R_MIPS_CHERI_CAPABILITY/R_MIPS_NONE/R_MIPS_NONE .Llpad0 0x0{{$}} -; MIPS-OBJ-RELOCS-NEXT: R_MIPS_CHERI_CAPABILITY/R_MIPS_NONE/R_MIPS_NONE .Llpad1 0x0{{$}} -; MIPS-OBJ-RELOCS-NEXT: R_MIPS_CHERI_CAPABILITY/R_MIPS_NONE/R_MIPS_NONE .Llpad2 0x0{{$}} +; MIPS-OBJ-RELOCS-NEXT: R_MIPS_CHERI_CAPABILITY/R_MIPS_NONE/R_MIPS_NONE .L_Z4testll$local 0x80 +; MIPS-OBJ-RELOCS-NEXT: R_MIPS_CHERI_CAPABILITY/R_MIPS_NONE/R_MIPS_NONE .L_Z4testll$local 0x60 +; MIPS-OBJ-RELOCS-NEXT: R_MIPS_CHERI_CAPABILITY/R_MIPS_NONE/R_MIPS_NONE .L_Z5test2ll$local 0x5C ; MIPS-OBJ-RELOCS-NEXT: R_MIPS_PC32/R_MIPS_NONE/R_MIPS_NONE .L_ZTIl.DW.stub 0x0 -; MIPS-OBJ-RELOCS-NEXT: R_MIPS_CHERI_CAPABILITY/R_MIPS_NONE/R_MIPS_NONE .Llpad3 0x0{{$}} +; MIPS-OBJ-RELOCS-NEXT: R_MIPS_CHERI_CAPABILITY/R_MIPS_NONE/R_MIPS_NONE .L_Z9test_weakll$local 0x34 ; MIPS-OBJ-RELOCS-NEXT: } ; RV64-OBJ-RELOCS: Section ({{.+}}) .rela.gcc_except_table { -; RV64-OBJ-RELOCS-NEXT: 0x20 R_RISCV_CHERI_CAPABILITY .Llpad0 0x0{{$}} -; RV64-OBJ-RELOCS-NEXT: 0x40 R_RISCV_CHERI_CAPABILITY .Llpad1 0x0{{$}} -; RV64-OBJ-RELOCS-NEXT: 0x80 R_RISCV_CHERI_CAPABILITY .Llpad2 0x0{{$}} +; RV64-OBJ-RELOCS-NEXT: 0x20 R_RISCV_CHERI_CAPABILITY .L_Z4testll$local 0x5C +; RV64-OBJ-RELOCS-NEXT: 0x40 R_RISCV_CHERI_CAPABILITY .L_Z4testll$local 0x48 +; RV64-OBJ-RELOCS-NEXT: 0x80 R_RISCV_CHERI_CAPABILITY .L_Z5test2ll$local 0x44 ; RV64-OBJ-RELOCS-NEXT: 0xA4 R_RISCV_ADD32 .L_ZTIl.DW.stub 0x0 ; RV64-OBJ-RELOCS-NEXT: 0xA4 R_RISCV_SUB32 .L0 0x0 -; RV64-OBJ-RELOCS-NEXT: 0xC0 R_RISCV_CHERI_CAPABILITY .Llpad3 0x0{{$}} +; RV64-OBJ-RELOCS-NEXT: 0xC0 R_RISCV_CHERI_CAPABILITY .L_Z9test_weakll$local 0x1C ; RV64-OBJ-RELOCS-NEXT: } ;; This should work with both -z text and -z notext diff --git a/llvm/include/llvm/BinaryFormat/ELFRelocs/RISCV.def b/llvm/include/llvm/BinaryFormat/ELFRelocs/RISCV.def index b052ae69af644..32a84d6a96c81 100644 --- a/llvm/include/llvm/BinaryFormat/ELFRelocs/RISCV.def +++ b/llvm/include/llvm/BinaryFormat/ELFRelocs/RISCV.def @@ -63,7 +63,7 @@ ELF_RELOC(R_RISCV_TLSDESC_CALL, 65) ELF_RELOC(R_RISCV_VENDOR, 191) // ELF_RELOC(R_RISCV_CUSTOM192, 192) // ELF_RELOC(R_RISCV_CUSTOM193, 193) -ELF_RELOC(R_RISCV_CUSTOM194, 194) +// ELF_RELOC(R_RISCV_CUSTOM194, 194) ELF_RELOC(R_RISCV_CUSTOM195, 195) // ELF_RELOC(R_RISCV_CUSTOM196, 196) // ELF_RELOC(R_RISCV_CUSTOM197, 197) @@ -129,7 +129,7 @@ ELF_RELOC(R_RISCV_CUSTOM255, 255) // CHERI relocations ELF_RELOC(R_RISCV_CHERI_CAPTAB_PCREL_HI20, 192) ELF_RELOC(R_RISCV_CHERI_CAPABILITY, 193) -// 194 reserved +ELF_RELOC(R_RISCV_FUNC_RELATIVE, 194) // 195 reserved ELF_RELOC(R_RISCV_CHERI_TPREL_CINCOFFSET, 196) // Deprecated ELF_RELOC(R_RISCV_CHERI_TLS_IE_CAPTAB_PCREL_HI20, 197) // Deprecated diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h index 09ea8a3ef2502..ca25a66c72b9c 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAG.h +++ b/llvm/include/llvm/CodeGen/SelectionDAG.h @@ -2072,6 +2072,14 @@ class SelectionDAG { /// We use this predicate to simplify operations downstream. LLVM_ABI bool SignBitIsZero(SDValue Op, unsigned Depth = 0) const; + /// Return true if the sign bit of Op is known to be one. + /// We use this predicate to simplify operations downstream. + bool SignBitIsOne(SDValue Op, unsigned Depth = 0) const; + + /// Return true if the sign bits of N0 and N1 are known to be the same. + /// We use this predicate to simplify operations downstream. + bool SignBitIsSame(SDValue N0, SDValue N1, unsigned Depth = 0) const; + /// Return true if 'Op & Mask' is known to be zero. We /// use this predicate to simplify operations downstream. Op and Mask are /// known to be the same type. diff --git a/llvm/include/llvm/MC/MCStreamer.h b/llvm/include/llvm/MC/MCStreamer.h index f41544e74201e..a9f5003c802d9 100644 --- a/llvm/include/llvm/MC/MCStreamer.h +++ b/llvm/include/llvm/MC/MCStreamer.h @@ -778,10 +778,12 @@ class LLVM_ABI MCStreamer { // MCAsmInfo only knowns about the triple which is not enough // Emit the expression \p Value into the output as a CHERI capability - void EmitCheriCapability(const MCSymbol *Value, int64_t Addend, - unsigned CapSize, SMLoc Loc = SMLoc()); - void EmitCheriCapability(const MCSymbol *Value, const MCExpr *Addend, - unsigned CapSize, SMLoc Loc = SMLoc()); + virtual void emitCheriCapability(const MCExpr *Value, unsigned CapSize, + SMLoc Loc = SMLoc()); + + /// Special case of emitCheriCapability that avoids the client having to pass + /// in a MCExpr for MCSymbols. + void emitSymbolCheriCapability(const MCSymbol *Sym, unsigned CapSize); // Emit \p Value as an untagged capability-size value virtual void emitCheriIntcap(int64_t Value, unsigned CapSize, @@ -1130,11 +1132,8 @@ class LLVM_ABI MCStreamer { /// Return the end symbol generated inside, the caller needs to emit it. virtual MCSymbol *emitDwarfUnitLength(const Twine &Prefix, const Twine &Comment); -protected: - virtual void EmitCheriCapabilityImpl(const MCSymbol *Value, - const MCExpr *Addend, unsigned CapSize, - SMLoc Loc = SMLoc()); +protected: /// Target-independent untagged CHERI capability virtual void emitCheriIntcapGeneric(const MCExpr *Expr, unsigned CapSize, SMLoc Loc); diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index fa89c9f9ef9d4..ccd8b7172de89 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -2655,8 +2655,8 @@ bool AsmPrinter::doFinalization(Module &M) { for (const auto &Stub : Stubs) { OutStreamer->emitLabel(Stub.first); if (DL.isFatPointer(AS)) - OutStreamer->EmitCheriCapability(Stub.second.getPointer(), nullptr, - Size); + OutStreamer->emitSymbolCheriCapability(Stub.second.getPointer(), + Size); else OutStreamer->emitSymbolValue(Stub.second.getPointer(), Size); } @@ -4149,18 +4149,25 @@ static void emitGlobalConstantCHERICap(const DataLayout &DL, const Constant *CV, return; } GlobalValue *GV; - APInt Addend; - if (IsConstantOffsetFromGlobal(const_cast(CV), GV, Addend, DL, + APInt Offset; + MCContext &Ctx = AP.OutContext; + if (IsConstantOffsetFromGlobal(const_cast(CV), GV, Offset, DL, true)) { - AP.OutStreamer->EmitCheriCapability(AP.getSymbol(GV), Addend.getSExtValue(), - CapWidth); + const MCExpr *CapExpr = MCSymbolRefExpr::create(AP.getSymbol(GV), Ctx); + int64_t Addend = Offset.getSExtValue(); + if (Addend != 0) + CapExpr = MCBinaryExpr::createAdd( + CapExpr, MCConstantExpr::create(Addend, Ctx), Ctx); + AP.OutStreamer->emitCheriCapability(CapExpr, CapWidth); return; } else if (const MCSymbolRefExpr *SRE = dyn_cast(Expr)) { if (auto BA = dyn_cast(CV)) { // For block addresses we emit `.chericap FN+(.LtmpN - FN)` auto FnStart = AP.getSymbol(BA->getFunction()); - const MCExpr *DiffToStart = MCBinaryExpr::createSub(SRE, MCSymbolRefExpr::create(FnStart, AP.OutContext), AP.OutContext); - AP.OutStreamer->EmitCheriCapability(FnStart, DiffToStart, CapWidth); + const MCExpr *Start = MCSymbolRefExpr::create(FnStart, Ctx); + const MCExpr *DiffToStart = MCBinaryExpr::createSub(SRE, Start, Ctx); + const MCExpr *CapExpr = MCBinaryExpr::createAdd(Start, DiffToStart, Ctx); + AP.OutStreamer->emitCheriCapability(CapExpr, CapWidth); return; } // Emit capability for label whose address is stored in a global variable @@ -4169,7 +4176,7 @@ static void emitGlobalConstantCHERICap(const DataLayout &DL, const Constant *CV, report_fatal_error( "Cannot emit a global .chericap referring to a temporary since this " "will result in the wrong value at runtime!"); - AP.OutStreamer->EmitCheriCapability(&SRE->getSymbol(), nullptr, CapWidth); + AP.OutStreamer->emitSymbolCheriCapability(&SRE->getSymbol(), CapWidth); return; } } diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp index aa91f06c9f693..a8b3a8199a17b 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp @@ -203,21 +203,22 @@ void AsmPrinter::emitCallSiteValue(uint64_t Value, unsigned Encoding) const { void AsmPrinter::emitCallSiteCheriCapability(const MCSymbol *Hi, const MCSymbol *Lo) const { + assert(CurrentFnBeginLocal && "Missing local function entry alias for EH!"); + + const TargetLoweringObjectFile &TLOF = getObjFileLowering(); + + // Get the Hi-Lo expression. We use (and need) Lo since the offset needs to + // be a constant expression, whereas CurrentFnSym is preemptible. + const MCExpr *DiffToStart = MCBinaryExpr::createSub( + MCSymbolRefExpr::create(Hi, OutContext), + MCSymbolRefExpr::create(Lo, OutContext), OutContext); // Note: we cannot use Lo here since that is an assembler-local symbol and - // this would result in EmitCheriCapability() creating a relocation against + // this would result in emitCheriCapability() creating a relocation against // section plus offset rather than function + offset. We need the right // bounds and permissions info and need to use a non-preemptible alias. - assert(CurrentFnBeginLocal && "Missing local function entry alias for EH!"); - // Ensure that CurrentFnBeginLocal ends up in the symbol table so that ld.lld - // can find the surrounding function even if the actual function is not used. - // This happens with weak functions where the unused function's landing pads - // would otherwise no longer have a valid surrounding symbol. While this does - // not matter as they are unused, it does trigger ld.lld warnings. Always - // emitting the local symbol also ensures we can find a valid surrounding and - // non-preemptible symbol with a size set. - CurrentFnBeginLocal->setUsedInReloc(); - OutStreamer->EmitCheriCapability( - Hi, (int64_t)0, getObjFileLowering().getCheriCapabilitySize(TM)); + const MCExpr *Expr = MCSymbolRefExpr::create(CurrentFnBeginLocal, OutContext); + Expr = MCBinaryExpr::createAdd(Expr, DiffToStart, OutContext); + OutStreamer->emitCheriCapability(Expr, TLOF.getCheriCapabilitySize(TM)); } //===----------------------------------------------------------------------===// diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp index 1ab19ccf4e064..3b9c6f210d92d 100644 --- a/llvm/lib/CodeGen/MachineFunction.cpp +++ b/llvm/lib/CodeGen/MachineFunction.cpp @@ -831,13 +831,7 @@ void MachineFunction::addInvoke(MachineBasicBlock *LandingPad, } MCSymbol *MachineFunction::addLandingPad(MachineBasicBlock *LandingPad) { - // For the purecap ABIs we create a relocation against this symbol, so ensure - // that there is a named symbol in the object file to make objdump/readelf - // output more easily parseable. We avoid doing this unconditonally as it - // would affect many upstream tests. - MCSymbol *LandingPadLabel = Ctx.getAsmInfo()->isCheriPurecapABI() - ? Ctx.createNamedTempSymbol("lpad") - : Ctx.createTempSymbol(); + MCSymbol *LandingPadLabel = Ctx.createTempSymbol(); LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad); LP.LandingPadLabel = LandingPadLabel; diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index af6b74563b06c..db5ebca75fa5b 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2721,6 +2721,12 @@ SDValue DAGCombiner::visitPTRADD(SDNode *N) { if (isNullConstant(N1)) return N0; + auto GetPTRADD2 = [this, &DL](SDValue &Base, SDValue &Off1, SDValue &Off2) { + SDValue Base2 = DAG.getMemBasePlusOffset(Base, Off1, DL); + AddToWorklist(Base2.getNode()); + return DAG.getMemBasePlusOffset(Base2, Off2, DL); + }; + // fold (ptradd 0, x) -> x if (PtrVT == IntVT && isNullConstant(N0)) return N1; @@ -2779,7 +2785,6 @@ SDValue DAGCombiner::visitPTRADD(SDNode *N) { // For now each architecture that wants this fold must implement it in the // target-specific code (see e.g. SITargetLowering::performPtrAddCombine) - // Reassociate: (ptradd (ptradd x, y), z) -> (ptradd x, (add y, z)) if: // * x is a null pointer; or // * the add can be constant-folded; or @@ -2792,6 +2797,14 @@ SDValue DAGCombiner::visitPTRADD(SDNode *N) { // patterns. Once we represent that with PTRMASK that will be less of a // concern, though we might still want to detect code not using the builtins // and canonicalise it to a PTRMASK. + // + // Commute: (ptradd (ptradd x, y), z) -> (ptradd (ptradd x, z), y) if: + // * y and z have the same sign and y is a constant. + // + // This allows immediate addressing modes to be used. Note that we need to be + // careful to ensure we don't transiently become unrepresentable if the + // original DAG does not already do so, and this is the case if both PTRADDs + // have the same sign. if (N0.getOpcode() == ISD::PTRADD && !reassociationCanBreakAddressingModePattern(ISD::PTRADD, DL, N, N0, N1)) { SDValue X = N0.getOperand(0); @@ -2822,6 +2835,27 @@ SDValue DAGCombiner::visitPTRADD(SDNode *N) { (N0.hasOneUse() && Z.hasOneUse() && !DAG.isConstantIntBuildVectorOrConstantInt(Z))) return DAG.getMemBasePlusOffset(X, Add, DL); + if (DAG.SignBitIsSame(Y, Z) && DAG.isConstantIntBuildVectorOrConstantInt(Y)) + return GetPTRADD2(X, Z, Y); + } + + // Transform: (ptradd x, (add y, z)) -> (ptradd (ptradd x, y), z) if: + // * both y and z have the same sign and z is a constant. + // + // Transform: (ptradd x, (add y, z)) -> (ptradd (ptradd x, z), y) if: + // * both y and z have the same sign and y is a constant. + // + // As above, this allows for immediate addressing modes. + if (N1.getOpcode() == ISD::ADD) { + SDValue X = N0; + SDValue Y = N1.getOperand(0); + SDValue Z = N1.getOperand(1); + if (DAG.SignBitIsSame(Y, Z)) { + if (DAG.isConstantIntBuildVectorOrConstantInt(Y)) + return GetPTRADD2(X, Z, Y); + if (DAG.isConstantIntBuildVectorOrConstantInt(Z)) + return GetPTRADD2(X, Y, Z); + } } return SDValue(); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 85ffaa9cfb3df..74184df1b0662 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2964,6 +2964,20 @@ bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); } +/// SignBitIsOne - Return true if the sign bit of Op is known to be one. We +/// use this predicate to simplify operations downstream. +bool SelectionDAG::SignBitIsOne(SDValue Op, unsigned Depth) const { + unsigned BitWidth = Op.getScalarValueSizeInBits(); + return MaskedValueIsAllOnes(Op, APInt::getSignMask(BitWidth), Depth); +} + +/// SignBitIsSame - Return true if the sign bits of N0 and N1 are known to be +/// the same. We use this predicate to simplify operations downstream. +bool SelectionDAG::SignBitIsSame(SDValue N0, SDValue N1, unsigned Depth) const { + return (SignBitIsZero(N0, Depth) && SignBitIsZero(N1, Depth)) || + (SignBitIsOne(N0, Depth) && SignBitIsOne(N1, Depth)); +} + /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use /// this predicate to simplify operations downstream. Mask is known to be zero /// for bits that V cannot have. diff --git a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp index ce11fbd46fe2b..5d77ca1c0c594 100644 --- a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp +++ b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp @@ -428,7 +428,7 @@ void TargetLoweringObjectFileELF::emitPersonalityValueImpl( unsigned AS = DL.getProgramAddressSpace(); unsigned Size = DL.getPointerSize(AS); if (DL.isFatPointer(DL.getProgramAddressSpace())) { - Streamer.EmitCheriCapability(Sym, nullptr, Size); + Streamer.emitSymbolCheriCapability(Sym, Size); } else { Streamer.emitSymbolValue(Sym, Size); } diff --git a/llvm/lib/MC/MCAsmStreamer.cpp b/llvm/lib/MC/MCAsmStreamer.cpp index 820a8941db15b..9cc96131ba9e0 100644 --- a/llvm/lib/MC/MCAsmStreamer.cpp +++ b/llvm/lib/MC/MCAsmStreamer.cpp @@ -414,8 +414,8 @@ class MCAsmStreamer final : public MCStreamer { void emitBundleLock(bool AlignToEnd) override; void emitBundleUnlock() override; - void EmitCheriCapabilityImpl(const MCSymbol *Symbol, const MCExpr *Addend, - unsigned CapSize, SMLoc Loc = SMLoc()) override; + virtual void emitCheriCapability(const MCExpr *Value, unsigned CapSize, + SMLoc Loc = SMLoc()) override; void emitCheriIntcap(const MCExpr *Expr, unsigned CapSize, SMLoc Loc = SMLoc()) override; @@ -2548,24 +2548,15 @@ void MCAsmStreamer::emitBundleUnlock() { EmitEOL(); } -void MCAsmStreamer::EmitCheriCapabilityImpl(const MCSymbol *Symbol, - const MCExpr *Addend, - unsigned CapSize, SMLoc Loc) { +void MCAsmStreamer::emitCheriCapability(const MCExpr *Value, unsigned CapSize, + SMLoc Loc) { OS << "\t.chericap\t"; - Symbol->print(OS, MAI); - // Avoid parens,unary minus, and zero for constants: - assert(Addend); - if (const MCConstantExpr *CE = dyn_cast(Addend)) { - int64_t Offset = CE->getValue(); - if (Offset > 0) - OS << "+" << Offset; - else if (Offset < 0) - OS << Offset; + if (MCTargetStreamer *TS = getTargetStreamer()) { + TS->emitValue(Value); } else { - OS << " + "; - MAI->printExpr(OS, *Addend); + MAI->printExpr(OS, *Value); + EmitEOL(); } - EmitEOL(); } void MCAsmStreamer::emitCheriIntcap(const MCExpr *Expr, unsigned CapSize, diff --git a/llvm/lib/MC/MCParser/AsmParser.cpp b/llvm/lib/MC/MCParser/AsmParser.cpp index 13863ee7fd64e..3816d424a68bc 100644 --- a/llvm/lib/MC/MCParser/AsmParser.cpp +++ b/llvm/lib/MC/MCParser/AsmParser.cpp @@ -5849,39 +5849,11 @@ bool AsmParser::parseDirectiveCheriCap(SMLoc DirectiveLoc) { int64_t Offset = 0; unsigned CapSize = getTargetParser().getCheriCapabilitySize(); // Allow .chericap 0x123456 to create an untagged uintcap_t - if (SymExpr->evaluateAsAbsolute(Offset)) { + if (SymExpr->evaluateAsAbsolute(Offset, getStreamer().getAssemblerPtr())) getStreamer().emitCheriIntcap(Offset, CapSize, ExprLoc); - } else { - const MCSymbolRefExpr *SRE = nullptr; - if (const MCBinaryExpr *BE = dyn_cast(SymExpr)) { - const MCConstantExpr *CE = nullptr; - bool Neg = false; - switch (BE->getOpcode()) { - case MCBinaryExpr::Sub: - Neg = true; - LLVM_FALLTHROUGH; - case MCBinaryExpr::Add: - CE = dyn_cast(BE->getRHS()); - break; - default: - break; - } + else + getStreamer().emitCheriCapability(SymExpr, CapSize, ExprLoc); - SRE = dyn_cast(BE->getLHS()); - if (!SRE || !CE) - return Error(ExprLoc, "must be sym[+const]"); - Offset = CE->getValue(); - if (Neg) - Offset = -Offset; - } else { - SRE = dyn_cast(SymExpr); - if (!SRE) - return Error(ExprLoc, "must be sym[+const]"); - Offset = 0; - } - const MCSymbol &Symbol = SRE->getSymbol(); - getStreamer().EmitCheriCapability(&Symbol, Offset, CapSize, ExprLoc); - } if (parseToken(AsmToken::EndOfStatement, "expected end of statement")) return true; return false; diff --git a/llvm/lib/MC/MCStreamer.cpp b/llvm/lib/MC/MCStreamer.cpp index 037ea853df032..3f128f343c99a 100644 --- a/llvm/lib/MC/MCStreamer.cpp +++ b/llvm/lib/MC/MCStreamer.cpp @@ -195,25 +195,15 @@ void MCStreamer::emitSymbolValue(const MCSymbol *Sym, unsigned Size, emitCOFFSecRel32(Sym, /*Offset=*/0); } -void MCStreamer::EmitCheriCapability(const MCSymbol *Value, - const MCExpr *Addend, unsigned CapSize, +void MCStreamer::emitCheriCapability(const MCExpr *Value, unsigned CapSize, SMLoc Loc) { - if (!Addend) { - Addend = MCConstantExpr::create(0, Context); - } - EmitCheriCapabilityImpl(Value, Addend, CapSize, Loc); -} - -void MCStreamer::EmitCheriCapabilityImpl(const MCSymbol *Value, - const MCExpr *Addend, unsigned CapSize, - SMLoc Loc) { - report_fatal_error("EmitCheriCapability is not implemented for this target!"); + report_fatal_error("emitCheriCapability is not implemented for this target!"); } -void MCStreamer::EmitCheriCapability(const MCSymbol *Value, int64_t Addend, - unsigned CapSize, SMLoc Loc) { - EmitCheriCapability(Value, MCConstantExpr::create(Addend, Context), CapSize, - Loc); +void MCStreamer::emitSymbolCheriCapability(const MCSymbol *Sym, + unsigned CapSize) { + const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext()); + emitCheriCapability(Expr, CapSize); } void MCStreamer::emitCheriIntcap(int64_t Value, unsigned CapSize, SMLoc Loc) { diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp index 0b7afa9aa38f0..636fb7236b00d 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp @@ -379,8 +379,7 @@ unsigned MipsELFObjectWriter::getRelocType(const MCFixup &Fixup, const auto ElfSym = cast(Target.getAddSym()); // Assert that we don't create .chericap relocations against temporary // symbols since those will result in wrong relocations (against sec+offset) - if (ElfSym->isDefined() && !ElfSym->getSize() && - !ElfSym->getName().starts_with(".Llpad")) { + if (ElfSym->isDefined() && !ElfSym->getSize()) { getContext().reportWarning(Fixup.getLoc(), "creating a R_MIPS_CHERI_CAPABILITY relocation against an unsized " "defined symbol: " + ElfSym->getName() + diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp index 095cce365f863..a235e68b4dc51 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp @@ -103,22 +103,17 @@ void MipsELFStreamer::emitValueImpl(const MCExpr *Value, unsigned Size, Labels.clear(); } -void MipsELFStreamer::EmitCheriCapabilityImpl(const MCSymbol *Symbol, - const MCExpr *Addend, - unsigned CapSize, SMLoc Loc) { - assert(Addend && "Should have received a MCConstExpr(0) instead of nullptr"); - visitUsedSymbol(*Symbol); - MCContext &Context = getContext(); - const MCSymbolRefExpr *SRE = MCSymbolRefExpr::create(Symbol, 0, Context, Loc); - const MCBinaryExpr *CapExpr = MCBinaryExpr::createAdd(SRE, Addend, Context, Loc); +void MipsELFStreamer::emitCheriCapability(const MCExpr *Value, unsigned CapSize, + SMLoc Loc) { + visitUsedExpr(*Value); // Pad to ensure that the capability is aligned emitValueToAlignment(Align(CapSize), 0, 1, 0); MCDataFragment *DF = new MCDataFragment(); insert(DF); - MCFixup cheriFixup = MCFixup::create( - 0, CapExpr, MCFixupKind(Mips::fixup_CHERI_CAPABILITY)); + MCFixup cheriFixup = + MCFixup::create(0, Value, MCFixupKind(Mips::fixup_CHERI_CAPABILITY)); DF->addFixup(cheriFixup); DF->appendContents(CapSize, '\xca'); } diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h b/llvm/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h index 916d1a30e035e..5f9e1adaa28ec 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h @@ -74,8 +74,8 @@ class MipsELFStreamer : public MCELFStreamer { void createPendingLabelRelocs(); protected: - void EmitCheriCapabilityImpl(const MCSymbol *Symbol, const MCExpr *Addend, - unsigned CapSize, SMLoc Loc) override; + void emitCheriCapability(const MCExpr *Value, unsigned CapSize, + SMLoc Loc) override; }; MCELFStreamer *createMipsELFStreamer(MCContext &Context, diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp index 71f8f2dac3dd7..2f51d92df671e 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp @@ -217,15 +217,9 @@ void RISCVELFStreamer::emitCheriIntcap(const MCExpr *Expr, unsigned CapSize, emitCheriIntcapGeneric(Expr, CapSize, Loc); } -void RISCVELFStreamer::EmitCheriCapabilityImpl(const MCSymbol *Symbol, - const MCExpr *Addend, - unsigned CapSize, SMLoc Loc) { - assert(Addend && "Should have received a MCConstExpr(0) instead of nullptr"); - visitUsedSymbol(*Symbol); - MCContext &Context = getContext(); - - const MCSymbolRefExpr *SRE = MCSymbolRefExpr::create(Symbol, 0, Context, Loc); - const MCBinaryExpr *CapExpr = MCBinaryExpr::createAdd(SRE, Addend, Context); +void RISCVELFStreamer::emitCheriCapability(const MCExpr *Value, + unsigned CapSize, SMLoc Loc) { + visitUsedExpr(*Value); // Pad to ensure that the capability is aligned emitValueToAlignment(Align(CapSize), 0, 1, 0); @@ -233,7 +227,7 @@ void RISCVELFStreamer::EmitCheriCapabilityImpl(const MCSymbol *Symbol, MCDataFragment *DF = new MCDataFragment(); insert(DF); MCFixup CapFixup = - MCFixup::create(0, CapExpr, MCFixupKind(RISCV::fixup_riscv_capability)); + MCFixup::create(0, Value, MCFixupKind(RISCV::fixup_riscv_capability)); DF->addFixup(CapFixup); DF->appendContents(CapSize, '\xca'); } diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h index f0f3f5a091a96..6fde4dc3a9ba8 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h @@ -43,8 +43,8 @@ class RISCVELFStreamer : public MCELFStreamer { SMLoc Loc) override; protected: - void EmitCheriCapabilityImpl(const MCSymbol *Symbol, const MCExpr *Addend, - unsigned CapSize, SMLoc Loc) override; + void emitCheriCapability(const MCExpr *Value, unsigned CapSize, + SMLoc Loc) override; }; class RISCVTargetELFStreamer : public RISCVTargetStreamer { diff --git a/llvm/lib/Target/RISCV/RISCVTargetObjectFile.cpp b/llvm/lib/Target/RISCV/RISCVTargetObjectFile.cpp index 365b074d2902b..0959ed0b82500 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetObjectFile.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetObjectFile.cpp @@ -7,9 +7,9 @@ //===----------------------------------------------------------------------===// #include "RISCVTargetObjectFile.h" +#include "MCTargetDesc/RISCVCompressedCap.h" #include "MCTargetDesc/RISCVMCObjectFileInfo.h" #include "RISCVTargetMachine.h" -#include "MCTargetDesc/RISCVCompressedCap.h" #include "llvm/BinaryFormat/ELF.h" #include "llvm/IR/Module.h" #include "llvm/MC/MCAsmInfo.h" diff --git a/llvm/lib/Transforms/Utils/CMakeLists.txt b/llvm/lib/Transforms/Utils/CMakeLists.txt index 3aaa778111972..5c4d3da4fb92d 100644 --- a/llvm/lib/Transforms/Utils/CMakeLists.txt +++ b/llvm/lib/Transforms/Utils/CMakeLists.txt @@ -98,7 +98,6 @@ add_llvm_component_library(LLVMTransformUtils ${LLVM_MAIN_INCLUDE_DIR}/llvm/Transforms/Utils DEPENDS - vt_gen intrinsics_gen LINK_COMPONENTS diff --git a/llvm/test/CodeGen/AArch64/cpa-selectiondag.ll b/llvm/test/CodeGen/AArch64/cpa-selectiondag.ll index 028accf7d3220..50b0bf2a2cb0f 100644 --- a/llvm/test/CodeGen/AArch64/cpa-selectiondag.ll +++ b/llvm/test/CodeGen/AArch64/cpa-selectiondag.ll @@ -683,11 +683,10 @@ define hidden void @multidim() { ; CHECK-CPA-O3-NEXT: add x10, x10, :lo12:a ; CHECK-CPA-O3-NEXT: ldrh w8, [x8, :lo12:b] ; CHECK-CPA-O3-NEXT: lsl x9, x8, #1 -; CHECK-CPA-O3-NEXT: add x8, x8, #1 ; CHECK-CPA-O3-NEXT: add x9, x9, #2 ; CHECK-CPA-O3-NEXT: addpt x9, x10, x9 ; CHECK-CPA-O3-NEXT: addpt x8, x9, x8 -; CHECK-CPA-O3-NEXT: ldrb w8, [x8] +; CHECK-CPA-O3-NEXT: ldrb w8, [x8, #1] ; CHECK-CPA-O3-NEXT: cbz w8, .LBB14_2 ; CHECK-CPA-O3-NEXT: // %bb.1: ; CHECK-CPA-O3-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill diff --git a/llvm/test/CodeGen/CHERI-Generic/Inputs/ptradd-immediate.ll b/llvm/test/CodeGen/CHERI-Generic/Inputs/ptradd-immediate.ll new file mode 100644 index 0000000000000..6f59ea4861c78 --- /dev/null +++ b/llvm/test/CodeGen/CHERI-Generic/Inputs/ptradd-immediate.ll @@ -0,0 +1,59 @@ +; RUN: llc @PURECAP_HARDFLOAT_ARGS@ < %s | FileCheck %s --check-prefix=PURECAP +;; Hybrid baseline to compare against +; RUN: sed 's/addrspace(200)//g' %s | llc @HYBRID_HARDFLOAT_ARGS@ | FileCheck %s --check-prefix=HYBRID + +;; If both offsets are known to be non-negative it is safe to commute them and +;; use an immediate load. +define i32 @nneg_nneg(ptr addrspace(200) %p, i16 %x) { + %x.ext = zext i16 %x to i64 + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 1, i64 %x.ext + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If both offsets are known to be negative it is safe to commute them and use +;; an immediate load. +define i32 @neg_neg(ptr addrspace(200) %p, i16 %x) { + %x.ext = zext i16 %x to i64 + %x.pos = or i64 %x.ext, 1 + %x.neg = sub i64 0, %x.pos + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 -1, i64 %x.neg + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If one offset is known to be non-negative and the other negative it is not in +;; general safe to commute them and use an immediate load. +define i32 @nneg_neg(ptr addrspace(200) %p, i16 %x) { + %x.ext = zext i16 %x to i64 + %x.pos = or i64 %x.ext, 1 + %x.neg = sub i64 0, %x.pos + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 1, i64 %x.neg + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If one offset is known to be non-negative and the other negative it is not in +;; general safe to commute them and use an immediate load. +define i32 @neg_nneg(ptr addrspace(200) %p, i16 %x) { + %x.ext = zext i16 %x to i64 + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 -1, i64 %x.ext + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If we do not know the sign of one offset it is not in general safe to +;; commute them and use an immediate load. +define i32 @nneg_unknown(ptr addrspace(200) %p, i64 %x) { + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 1, i64 %x + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If we do not know the sign of one offset it is not in general safe to +;; commute them and use an immediate load. +define i32 @neg_unknown(ptr addrspace(200) %p, i64 %x) { + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 -1, i64 %x + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/landingpad-non-preemptible.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/landingpad-non-preemptible.ll index b4850ccc76bf6..8beb262b291b1 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/landingpad-non-preemptible.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/landingpad-non-preemptible.ll @@ -19,52 +19,6 @@ @_ZTIi = external dso_local addrspace(200) constant ptr addrspace(200) define dso_local noundef signext i32 @_Z8do_catchv() local_unnamed_addr addrspace(200) #0 personality ptr addrspace(200) @__gxx_personality_v0 { -; CHECK-LABEL: _Z8do_catchv: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: cincoffset $c11, $c11, -48 -; CHECK-NEXT: .cfi_def_cfa_offset 48 -; CHECK-NEXT: csd $16, $zero, 40($c11) # 8-byte Folded Spill -; CHECK-NEXT: csc $c18, $zero, 16($c11) # 16-byte Folded Spill -; CHECK-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill -; CHECK-NEXT: .cfi_offset 16, -8 -; CHECK-NEXT: .cfi_offset 90, -32 -; CHECK-NEXT: .cfi_offset 89, -48 -; CHECK-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8) -; CHECK-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4) -; CHECK-NEXT: cgetpccincoffset $c18, $1 -; CHECK-NEXT: .Ltmp0: -; CHECK-NEXT: clcbi $c12, %capcall20(_Z3foov)($c18) -; CHECK-NEXT: cjalr $c12, $c17 -; CHECK-NEXT: nop -; CHECK-NEXT: .Ltmp1: -; CHECK-NEXT: .LBB0_1: # %return -; CHECK-NEXT: sll $2, $2, 0 -; CHECK-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload -; CHECK-NEXT: clc $c18, $zero, 16($c11) # 16-byte Folded Reload -; CHECK-NEXT: cld $16, $zero, 40($c11) # 8-byte Folded Reload -; CHECK-NEXT: cjr $c17 -; CHECK-NEXT: cincoffset $c11, $c11, 48 -; CHECK-NEXT: .LBB0_2: # %lpad -; CHECK-NEXT: .Llpad0: -; CHECK-NEXT: cmove $c3, $c16 -; CHECK-NEXT: clcbi $c12, %capcall20(__cxa_begin_catch)($c18) -; CHECK-NEXT: cjalr $c12, $c17 -; CHECK-NEXT: sll $16, $5, 0 -; CHECK-NEXT: addiu $1, $zero, 2 -; CHECK-NEXT: bne $16, $1, .LBB0_4 -; CHECK-NEXT: nop -; CHECK-NEXT: # %bb.3: # %catch1 -; CHECK-NEXT: clcbi $c12, %capcall20(__cxa_end_catch)($c18) -; CHECK-NEXT: cjalr $c12, $c17 -; CHECK-NEXT: nop -; CHECK-NEXT: b .LBB0_1 -; CHECK-NEXT: addiu $2, $zero, 1 -; CHECK-NEXT: .LBB0_4: # %catch -; CHECK-NEXT: clcbi $c12, %capcall20(__cxa_end_catch)($c18) -; CHECK-NEXT: cjalr $c12, $c17 -; CHECK-NEXT: nop -; CHECK-NEXT: b .LBB0_1 -; CHECK-NEXT: addiu $2, $zero, 2 entry: %call = invoke noundef signext i32 @_Z3foov() to label %return unwind label %lpad @@ -122,7 +76,7 @@ declare dso_local void @__cxa_end_catch() local_unnamed_addr addrspace(200) ; CHECK-NEXT: [[CS_DIRECTIVE]] .Ltmp1-.Ltmp0 # Call between .Ltmp0 and .Ltmp1 ; Note: RISC-V uses DW_EH_PE_udata4, so the 0xc marker uses 4 bytes instead of 1 ; CHECK-NEXT: [[SMALL_CS_DIRECTIVE:(\.byte)|(\.word)]] 12 # (landing pad is a capability) -; CHECK-NEXT: .chericap .Llpad0 # jumps to .Llpad0 +; CHECK-NEXT: .chericap .L_Z8do_catchv$local+(.Ltmp2-.Lfunc_begin0) # jumps to .Ltmp2 ; CHECK-NEXT: .byte 3 # On action: 2 ; CHECK-NEXT: [[CS_DIRECTIVE]] .Ltmp1-.Lfunc_begin0 # >> Call Site 2 << ; CHECK-NEXT: [[CS_DIRECTIVE]] .Lfunc_end0-.Ltmp1 # Call between .Ltmp1 and .Lfunc_end0 @@ -149,7 +103,7 @@ declare dso_local void @__cxa_end_catch() local_unnamed_addr addrspace(200) ; RELOCS-LABEL: Relocations [ ; RELOCS-LABEL: Section ({{.+}}) .rela.gcc_except_table { -; RELOCS-NEXT: 0x10 R_MIPS_CHERI_CAPABILITY/R_MIPS_NONE/R_MIPS_NONE .Llpad0 0x0{{$}} +; RELOCS-NEXT: 0x10 R_MIPS_CHERI_CAPABILITY/R_MIPS_NONE/R_MIPS_NONE .L_Z8do_catchv$local 0x40 ; RELOCS-NEXT: R_MIPS_PC32/R_MIPS_NONE/R_MIPS_NONE .L_ZTIi.DW.stub 0x0 ; RELOCS-NEXT: } @@ -172,3 +126,5 @@ declare dso_local void @__cxa_end_catch() local_unnamed_addr addrspace(200) ; RELOCS-NEXT: Other: 0 ; RELOCS-NEXT: Section: .text (0x2) ; RELOCS-NEXT: } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/ptradd-immediate.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/ptradd-immediate.ll new file mode 100644 index 0000000000000..8bf64c722a8fb --- /dev/null +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/ptradd-immediate.ll @@ -0,0 +1,167 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/ptradd-immediate.ll +; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap < %s | FileCheck %s --check-prefix=PURECAP +;; Hybrid baseline to compare against +; RUN: sed 's/addrspace(200)//g' %s | llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi n64 | FileCheck %s --check-prefix=HYBRID + +;; If both offsets are known to be non-negative it is safe to commute them and +;; use an immediate load. +define i32 @nneg_nneg(ptr addrspace(200) %p, i16 %x) { +; PURECAP-LABEL: nneg_nneg: +; PURECAP: # %bb.0: +; PURECAP-NEXT: andi $1, $4, 65535 +; PURECAP-NEXT: dsll $1, $1, 2 +; PURECAP-NEXT: clw $2, $1, 4($c3) +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: nop +; +; HYBRID-LABEL: nneg_nneg: +; HYBRID: # %bb.0: +; HYBRID-NEXT: andi $1, $5, 65535 +; HYBRID-NEXT: dsll $1, $1, 2 +; HYBRID-NEXT: daddu $1, $4, $1 +; HYBRID-NEXT: lw $2, 4($1) +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: nop + %x.ext = zext i16 %x to i64 + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 1, i64 %x.ext + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If both offsets are known to be negative it is safe to commute them and use +;; an immediate load. +define i32 @neg_neg(ptr addrspace(200) %p, i16 %x) { +; PURECAP-LABEL: neg_neg: +; PURECAP: # %bb.0: +; PURECAP-NEXT: daddiu $1, $zero, -1 +; PURECAP-NEXT: xor $1, $4, $1 +; PURECAP-NEXT: daddiu $2, $zero, 1 +; PURECAP-NEXT: dsll $3, $2, 46 +; PURECAP-NEXT: daddiu $3, $3, -1 +; PURECAP-NEXT: dsll $3, $3, 16 +; PURECAP-NEXT: or $1, $1, $3 +; PURECAP-NEXT: dsll $2, $2, 62 +; PURECAP-NEXT: daddiu $2, $2, -2 +; PURECAP-NEXT: and $1, $1, $2 +; PURECAP-NEXT: dsll $1, $1, 2 +; PURECAP-NEXT: clw $2, $1, 0($c3) +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: nop +; +; HYBRID-LABEL: neg_neg: +; HYBRID: # %bb.0: +; HYBRID-NEXT: andi $1, $5, 65534 +; HYBRID-NEXT: dsll $1, $1, 2 +; HYBRID-NEXT: ori $1, $1, 4 +; HYBRID-NEXT: dsubu $1, $4, $1 +; HYBRID-NEXT: lw $2, -4($1) +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: nop + %x.ext = zext i16 %x to i64 + %x.pos = or i64 %x.ext, 1 + %x.neg = sub i64 0, %x.pos + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 -1, i64 %x.neg + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If one offset is known to be non-negative and the other negative it is not in +;; general safe to commute them and use an immediate load. +define i32 @nneg_neg(ptr addrspace(200) %p, i16 %x) { +; PURECAP-LABEL: nneg_neg: +; PURECAP: # %bb.0: +; PURECAP-NEXT: andi $1, $4, 65534 +; PURECAP-NEXT: dsll $1, $1, 2 +; PURECAP-NEXT: ori $1, $1, 4 +; PURECAP-NEXT: daddiu $2, $zero, 4 +; PURECAP-NEXT: dsubu $1, $2, $1 +; PURECAP-NEXT: clw $2, $1, 0($c3) +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: nop +; +; HYBRID-LABEL: nneg_neg: +; HYBRID: # %bb.0: +; HYBRID-NEXT: andi $1, $5, 65534 +; HYBRID-NEXT: dsll $1, $1, 2 +; HYBRID-NEXT: ori $1, $1, 4 +; HYBRID-NEXT: dsubu $1, $4, $1 +; HYBRID-NEXT: lw $2, 4($1) +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: nop + %x.ext = zext i16 %x to i64 + %x.pos = or i64 %x.ext, 1 + %x.neg = sub i64 0, %x.pos + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 1, i64 %x.neg + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If one offset is known to be non-negative and the other negative it is not in +;; general safe to commute them and use an immediate load. +define i32 @neg_nneg(ptr addrspace(200) %p, i16 %x) { +; PURECAP-LABEL: neg_nneg: +; PURECAP: # %bb.0: +; PURECAP-NEXT: andi $1, $4, 65535 +; PURECAP-NEXT: dsll $1, $1, 2 +; PURECAP-NEXT: clw $2, $1, -4($c3) +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: nop +; +; HYBRID-LABEL: neg_nneg: +; HYBRID: # %bb.0: +; HYBRID-NEXT: andi $1, $5, 65535 +; HYBRID-NEXT: dsll $1, $1, 2 +; HYBRID-NEXT: daddu $1, $4, $1 +; HYBRID-NEXT: lw $2, -4($1) +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: nop + %x.ext = zext i16 %x to i64 + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 -1, i64 %x.ext + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If we do not know the sign of one offset it is not in general safe to +;; commute them and use an immediate load. +define i32 @nneg_unknown(ptr addrspace(200) %p, i64 %x) { +; PURECAP-LABEL: nneg_unknown: +; PURECAP: # %bb.0: +; PURECAP-NEXT: dsll $1, $4, 2 +; PURECAP-NEXT: clw $2, $1, 4($c3) +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: nop +; +; HYBRID-LABEL: nneg_unknown: +; HYBRID: # %bb.0: +; HYBRID-NEXT: dsll $1, $5, 2 +; HYBRID-NEXT: daddu $1, $4, $1 +; HYBRID-NEXT: lw $2, 4($1) +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: nop + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 1, i64 %x + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If we do not know the sign of one offset it is not in general safe to +;; commute them and use an immediate load. +define i32 @neg_unknown(ptr addrspace(200) %p, i64 %x) { +; PURECAP-LABEL: neg_unknown: +; PURECAP: # %bb.0: +; PURECAP-NEXT: dsll $1, $4, 2 +; PURECAP-NEXT: clw $2, $1, -4($c3) +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: nop +; +; HYBRID-LABEL: neg_unknown: +; HYBRID: # %bb.0: +; HYBRID-NEXT: dsll $1, $5, 2 +; HYBRID-NEXT: daddu $1, $4, $1 +; HYBRID-NEXT: lw $2, -4($1) +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: nop + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 -1, i64 %x + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/landingpad-non-preemptible.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/landingpad-non-preemptible.ll index afcf07abbc15c..d2c4eb73ad501 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/landingpad-non-preemptible.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/landingpad-non-preemptible.ll @@ -41,7 +41,7 @@ define dso_local noundef signext i32 @_Z8do_catchv() local_unnamed_addr addrspac ; CHECK-NEXT: cret ; CHECK-NEXT: .LBB0_2: # %lpad ; CHECK-NEXT: .cfi_restore_state -; CHECK-NEXT: .Llpad0: +; CHECK-NEXT: .Ltmp2: ; CHECK-NEXT: mv s0, a1 ; CHECK-NEXT: ccall __cxa_begin_catch ; CHECK-NEXT: li a0, 2 @@ -111,7 +111,7 @@ declare dso_local void @__cxa_end_catch() local_unnamed_addr addrspace(200) ; CHECK-NEXT: [[CS_DIRECTIVE]] .Ltmp1-.Ltmp0 # Call between .Ltmp0 and .Ltmp1 ; Note: RISC-V uses DW_EH_PE_udata4, so the 0xc marker uses 4 bytes instead of 1 ; CHECK-NEXT: [[SMALL_CS_DIRECTIVE:(\.byte)|(\.word)]] 12 # (landing pad is a capability) -; CHECK-NEXT: .chericap .Llpad0 # jumps to .Llpad0 +; CHECK-NEXT: .chericap .L_Z8do_catchv$local+(.Ltmp2-.Lfunc_begin0) # jumps to .Ltmp2 ; CHECK-NEXT: .byte 3 # On action: 2 ; CHECK-NEXT: [[CS_DIRECTIVE]] .Ltmp1-.Lfunc_begin0 # >> Call Site 2 << ; CHECK-NEXT: [[CS_DIRECTIVE]] .Lfunc_end0-.Ltmp1 # Call between .Ltmp1 and .Lfunc_end0 @@ -138,7 +138,7 @@ declare dso_local void @__cxa_end_catch() local_unnamed_addr addrspace(200) ; RELOCS-LABEL: Relocations [ ; RELOCS-LABEL: Section ({{.+}}) .rela.gcc_except_table { -; RELOCS-NEXT: R_RISCV_CHERI_CAPABILITY .Llpad0 0x0{{$}} +; RELOCS-NEXT: R_RISCV_CHERI_CAPABILITY .L_Z8do_catchv$local 0x24 ; RELOCS-NEXT: R_RISCV_ADD32 .L_ZTIi.DW.stub 0x0 ; RELOCS-NEXT: R_RISCV_SUB32 .L0 0x0 ; RELOCS-NEXT: } diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/ptradd-immediate.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/ptradd-immediate.ll new file mode 100644 index 0000000000000..165b550bba6dc --- /dev/null +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/ptradd-immediate.ll @@ -0,0 +1,159 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/ptradd-immediate.ll +; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+xcheripurecap,+f < %s | FileCheck %s --check-prefix=PURECAP +;; Hybrid baseline to compare against +; RUN: sed 's/addrspace(200)//g' %s | llc -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f | FileCheck %s --check-prefix=HYBRID + +;; If both offsets are known to be non-negative it is safe to commute them and +;; use an immediate load. +define i32 @nneg_nneg(ptr addrspace(200) %p, i16 %x) { +; PURECAP-LABEL: nneg_nneg: +; PURECAP: # %bb.0: +; PURECAP-NEXT: slli a1, a1, 16 +; PURECAP-NEXT: srli a1, a1, 14 +; PURECAP-NEXT: addi a1, a1, 4 +; PURECAP-NEXT: cincoffset a0, a0, a1 +; PURECAP-NEXT: clw a0, 0(a0) +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: nneg_nneg: +; HYBRID: # %bb.0: +; HYBRID-NEXT: slli a1, a1, 16 +; HYBRID-NEXT: srli a1, a1, 14 +; HYBRID-NEXT: add a0, a0, a1 +; HYBRID-NEXT: lw a0, 4(a0) +; HYBRID-NEXT: ret + %x.ext = zext i16 %x to i64 + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 1, i64 %x.ext + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If both offsets are known to be negative it is safe to commute them and use +;; an immediate load. +define i32 @neg_neg(ptr addrspace(200) %p, i16 %x) { +; PURECAP-LABEL: neg_neg: +; PURECAP: # %bb.0: +; PURECAP-NEXT: not a1, a1 +; PURECAP-NEXT: andi a1, a1, -2 +; PURECAP-NEXT: slli a1, a1, 2 +; PURECAP-NEXT: lui a2, 1048512 +; PURECAP-NEXT: or a1, a1, a2 +; PURECAP-NEXT: cincoffset a0, a0, a1 +; PURECAP-NEXT: clw a0, 0(a0) +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: neg_neg: +; HYBRID: # %bb.0: +; HYBRID-NEXT: ori a1, a1, 1 +; HYBRID-NEXT: slli a1, a1, 16 +; HYBRID-NEXT: srli a1, a1, 14 +; HYBRID-NEXT: sub a0, a0, a1 +; HYBRID-NEXT: lw a0, -4(a0) +; HYBRID-NEXT: ret + %x.ext = zext i16 %x to i64 + %x.pos = or i64 %x.ext, 1 + %x.neg = sub i64 0, %x.pos + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 -1, i64 %x.neg + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If one offset is known to be non-negative and the other negative it is not in +;; general safe to commute them and use an immediate load. +define i32 @nneg_neg(ptr addrspace(200) %p, i16 %x) { +; PURECAP-LABEL: nneg_neg: +; PURECAP: # %bb.0: +; PURECAP-NEXT: ori a1, a1, 1 +; PURECAP-NEXT: slli a1, a1, 16 +; PURECAP-NEXT: srli a1, a1, 14 +; PURECAP-NEXT: li a2, 4 +; PURECAP-NEXT: sub a2, a2, a1 +; PURECAP-NEXT: cincoffset a0, a0, a2 +; PURECAP-NEXT: clw a0, 0(a0) +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: nneg_neg: +; HYBRID: # %bb.0: +; HYBRID-NEXT: ori a1, a1, 1 +; HYBRID-NEXT: slli a1, a1, 16 +; HYBRID-NEXT: srli a1, a1, 14 +; HYBRID-NEXT: sub a0, a0, a1 +; HYBRID-NEXT: lw a0, 4(a0) +; HYBRID-NEXT: ret + %x.ext = zext i16 %x to i64 + %x.pos = or i64 %x.ext, 1 + %x.neg = sub i64 0, %x.pos + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 1, i64 %x.neg + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If one offset is known to be non-negative and the other negative it is not in +;; general safe to commute them and use an immediate load. +define i32 @neg_nneg(ptr addrspace(200) %p, i16 %x) { +; PURECAP-LABEL: neg_nneg: +; PURECAP: # %bb.0: +; PURECAP-NEXT: slli a1, a1, 16 +; PURECAP-NEXT: srli a1, a1, 14 +; PURECAP-NEXT: addi a1, a1, -4 +; PURECAP-NEXT: cincoffset a0, a0, a1 +; PURECAP-NEXT: clw a0, 0(a0) +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: neg_nneg: +; HYBRID: # %bb.0: +; HYBRID-NEXT: slli a1, a1, 16 +; HYBRID-NEXT: srli a1, a1, 14 +; HYBRID-NEXT: add a0, a0, a1 +; HYBRID-NEXT: lw a0, -4(a0) +; HYBRID-NEXT: ret + %x.ext = zext i16 %x to i64 + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 -1, i64 %x.ext + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If we do not know the sign of one offset it is not in general safe to +;; commute them and use an immediate load. +define i32 @nneg_unknown(ptr addrspace(200) %p, i64 %x) { +; PURECAP-LABEL: nneg_unknown: +; PURECAP: # %bb.0: +; PURECAP-NEXT: slli a1, a1, 2 +; PURECAP-NEXT: addi a1, a1, 4 +; PURECAP-NEXT: cincoffset a0, a0, a1 +; PURECAP-NEXT: clw a0, 0(a0) +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: nneg_unknown: +; HYBRID: # %bb.0: +; HYBRID-NEXT: slli a1, a1, 2 +; HYBRID-NEXT: add a0, a0, a1 +; HYBRID-NEXT: lw a0, 4(a0) +; HYBRID-NEXT: ret + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 1, i64 %x + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If we do not know the sign of one offset it is not in general safe to +;; commute them and use an immediate load. +define i32 @neg_unknown(ptr addrspace(200) %p, i64 %x) { +; PURECAP-LABEL: neg_unknown: +; PURECAP: # %bb.0: +; PURECAP-NEXT: slli a1, a1, 2 +; PURECAP-NEXT: addi a1, a1, -4 +; PURECAP-NEXT: cincoffset a0, a0, a1 +; PURECAP-NEXT: clw a0, 0(a0) +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: neg_unknown: +; HYBRID: # %bb.0: +; HYBRID-NEXT: slli a1, a1, 2 +; HYBRID-NEXT: add a0, a0, a1 +; HYBRID-NEXT: lw a0, -4(a0) +; HYBRID-NEXT: ret + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 -1, i64 %x + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/landingpad-non-preemptible.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/landingpad-non-preemptible.ll index 248dd2035ca9d..37f8522c5a0ec 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/landingpad-non-preemptible.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/landingpad-non-preemptible.ll @@ -41,7 +41,7 @@ define dso_local noundef signext i32 @_Z8do_catchv() local_unnamed_addr addrspac ; CHECK-NEXT: cret ; CHECK-NEXT: .LBB0_2: # %lpad ; CHECK-NEXT: .cfi_restore_state -; CHECK-NEXT: .Llpad0: +; CHECK-NEXT: .Ltmp2: ; CHECK-NEXT: sext.w s0, a1 ; CHECK-NEXT: ccall __cxa_begin_catch ; CHECK-NEXT: li a0, 2 @@ -111,7 +111,7 @@ declare dso_local void @__cxa_end_catch() local_unnamed_addr addrspace(200) ; CHECK-NEXT: [[CS_DIRECTIVE]] .Ltmp1-.Ltmp0 # Call between .Ltmp0 and .Ltmp1 ; Note: RISC-V uses DW_EH_PE_udata4, so the 0xc marker uses 4 bytes instead of 1 ; CHECK-NEXT: [[SMALL_CS_DIRECTIVE:(\.byte)|(\.word)]] 12 # (landing pad is a capability) -; CHECK-NEXT: .chericap .Llpad0 # jumps to .Llpad0 +; CHECK-NEXT: .chericap .L_Z8do_catchv$local+(.Ltmp2-.Lfunc_begin0) # jumps to .Ltmp2 ; CHECK-NEXT: .byte 3 # On action: 2 ; CHECK-NEXT: [[CS_DIRECTIVE]] .Ltmp1-.Lfunc_begin0 # >> Call Site 2 << ; CHECK-NEXT: [[CS_DIRECTIVE]] .Lfunc_end0-.Ltmp1 # Call between .Ltmp1 and .Lfunc_end0 @@ -138,7 +138,7 @@ declare dso_local void @__cxa_end_catch() local_unnamed_addr addrspace(200) ; RELOCS-LABEL: Relocations [ ; RELOCS-LABEL: Section ({{.+}}) .rela.gcc_except_table { -; RELOCS-NEXT: R_RISCV_CHERI_CAPABILITY .Llpad0 0x0{{$}} +; RELOCS-NEXT: R_RISCV_CHERI_CAPABILITY .L_Z8do_catchv$local 0x24 ; RELOCS-NEXT: R_RISCV_ADD32 .L_ZTIi.DW.stub 0x0 ; RELOCS-NEXT: R_RISCV_SUB32 .L0 0x0 ; RELOCS-NEXT: } diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/ptradd-immediate.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/ptradd-immediate.ll new file mode 100644 index 0000000000000..2144a77d680d3 --- /dev/null +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/ptradd-immediate.ll @@ -0,0 +1,159 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/ptradd-immediate.ll +; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+xcheripurecap,+f,+d < %s | FileCheck %s --check-prefix=PURECAP +;; Hybrid baseline to compare against +; RUN: sed 's/addrspace(200)//g' %s | llc -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d | FileCheck %s --check-prefix=HYBRID + +;; If both offsets are known to be non-negative it is safe to commute them and +;; use an immediate load. +define i32 @nneg_nneg(ptr addrspace(200) %p, i16 %x) { +; PURECAP-LABEL: nneg_nneg: +; PURECAP: # %bb.0: +; PURECAP-NEXT: slli a1, a1, 48 +; PURECAP-NEXT: srli a1, a1, 46 +; PURECAP-NEXT: addi a1, a1, 4 +; PURECAP-NEXT: cincoffset a0, a0, a1 +; PURECAP-NEXT: clw a0, 0(a0) +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: nneg_nneg: +; HYBRID: # %bb.0: +; HYBRID-NEXT: slli a1, a1, 48 +; HYBRID-NEXT: srli a1, a1, 46 +; HYBRID-NEXT: add a0, a0, a1 +; HYBRID-NEXT: lw a0, 4(a0) +; HYBRID-NEXT: ret + %x.ext = zext i16 %x to i64 + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 1, i64 %x.ext + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If both offsets are known to be negative it is safe to commute them and use +;; an immediate load. +define i32 @neg_neg(ptr addrspace(200) %p, i16 %x) { +; PURECAP-LABEL: neg_neg: +; PURECAP: # %bb.0: +; PURECAP-NEXT: not a1, a1 +; PURECAP-NEXT: lui a2, 1048560 +; PURECAP-NEXT: or a1, a1, a2 +; PURECAP-NEXT: andi a1, a1, -2 +; PURECAP-NEXT: slli a1, a1, 2 +; PURECAP-NEXT: cincoffset a0, a0, a1 +; PURECAP-NEXT: clw a0, 0(a0) +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: neg_neg: +; HYBRID: # %bb.0: +; HYBRID-NEXT: slli a1, a1, 48 +; HYBRID-NEXT: srli a1, a1, 46 +; HYBRID-NEXT: ori a1, a1, 4 +; HYBRID-NEXT: sub a0, a0, a1 +; HYBRID-NEXT: lw a0, -4(a0) +; HYBRID-NEXT: ret + %x.ext = zext i16 %x to i64 + %x.pos = or i64 %x.ext, 1 + %x.neg = sub i64 0, %x.pos + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 -1, i64 %x.neg + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If one offset is known to be non-negative and the other negative it is not in +;; general safe to commute them and use an immediate load. +define i32 @nneg_neg(ptr addrspace(200) %p, i16 %x) { +; PURECAP-LABEL: nneg_neg: +; PURECAP: # %bb.0: +; PURECAP-NEXT: slli a1, a1, 48 +; PURECAP-NEXT: srli a1, a1, 46 +; PURECAP-NEXT: ori a1, a1, 4 +; PURECAP-NEXT: li a2, 4 +; PURECAP-NEXT: sub a2, a2, a1 +; PURECAP-NEXT: cincoffset a0, a0, a2 +; PURECAP-NEXT: clw a0, 0(a0) +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: nneg_neg: +; HYBRID: # %bb.0: +; HYBRID-NEXT: slli a1, a1, 48 +; HYBRID-NEXT: srli a1, a1, 46 +; HYBRID-NEXT: ori a1, a1, 4 +; HYBRID-NEXT: sub a0, a0, a1 +; HYBRID-NEXT: lw a0, 4(a0) +; HYBRID-NEXT: ret + %x.ext = zext i16 %x to i64 + %x.pos = or i64 %x.ext, 1 + %x.neg = sub i64 0, %x.pos + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 1, i64 %x.neg + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If one offset is known to be non-negative and the other negative it is not in +;; general safe to commute them and use an immediate load. +define i32 @neg_nneg(ptr addrspace(200) %p, i16 %x) { +; PURECAP-LABEL: neg_nneg: +; PURECAP: # %bb.0: +; PURECAP-NEXT: slli a1, a1, 48 +; PURECAP-NEXT: srli a1, a1, 46 +; PURECAP-NEXT: addi a1, a1, -4 +; PURECAP-NEXT: cincoffset a0, a0, a1 +; PURECAP-NEXT: clw a0, 0(a0) +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: neg_nneg: +; HYBRID: # %bb.0: +; HYBRID-NEXT: slli a1, a1, 48 +; HYBRID-NEXT: srli a1, a1, 46 +; HYBRID-NEXT: add a0, a0, a1 +; HYBRID-NEXT: lw a0, -4(a0) +; HYBRID-NEXT: ret + %x.ext = zext i16 %x to i64 + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 -1, i64 %x.ext + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If we do not know the sign of one offset it is not in general safe to +;; commute them and use an immediate load. +define i32 @nneg_unknown(ptr addrspace(200) %p, i64 %x) { +; PURECAP-LABEL: nneg_unknown: +; PURECAP: # %bb.0: +; PURECAP-NEXT: slli a1, a1, 2 +; PURECAP-NEXT: addi a1, a1, 4 +; PURECAP-NEXT: cincoffset a0, a0, a1 +; PURECAP-NEXT: clw a0, 0(a0) +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: nneg_unknown: +; HYBRID: # %bb.0: +; HYBRID-NEXT: slli a1, a1, 2 +; HYBRID-NEXT: add a0, a0, a1 +; HYBRID-NEXT: lw a0, 4(a0) +; HYBRID-NEXT: ret + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 1, i64 %x + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} + +;; If we do not know the sign of one offset it is not in general safe to +;; commute them and use an immediate load. +define i32 @neg_unknown(ptr addrspace(200) %p, i64 %x) { +; PURECAP-LABEL: neg_unknown: +; PURECAP: # %bb.0: +; PURECAP-NEXT: slli a1, a1, 2 +; PURECAP-NEXT: addi a1, a1, -4 +; PURECAP-NEXT: cincoffset a0, a0, a1 +; PURECAP-NEXT: clw a0, 0(a0) +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: neg_unknown: +; HYBRID: # %bb.0: +; HYBRID-NEXT: slli a1, a1, 2 +; HYBRID-NEXT: add a0, a0, a1 +; HYBRID-NEXT: lw a0, -4(a0) +; HYBRID-NEXT: ret + %q = getelementptr [1 x i32], ptr addrspace(200) %p, i64 -1, i64 %x + %ret = load i32, ptr addrspace(200) %q + ret i32 %ret +} diff --git a/llvm/test/CodeGen/Mips/cheri/cheri-blockaddress.ll b/llvm/test/CodeGen/Mips/cheri/cheri-blockaddress.ll index a9fe45979ac94..52f7d0f5035c3 100644 --- a/llvm/test/CodeGen/Mips/cheri/cheri-blockaddress.ll +++ b/llvm/test/CodeGen/Mips/cheri/cheri-blockaddress.ll @@ -62,7 +62,7 @@ indirectgoto: ; preds = %entry ; ASM: .end addrof_label_in_local ; ASM-LABEL: addrof_label_in_static.b: -; ASM-NEXT: .chericap addrof_label_in_static + .Ltmp0-addrof_label_in_static +; ASM-NEXT: .chericap addrof_label_in_static+(.Ltmp0-addrof_label_in_static) ; ASM-NEXT: .size addrof_label_in_static.b, [[#CAP_SIZE]] ; The .o file should contain a relocation against the function with a constant addend (0x1c) diff --git a/llvm/test/CodeGen/RISCV/cheri/function-start-directives.ll b/llvm/test/CodeGen/RISCV/cheri/function-start-directives.ll index 7bd3666a46632..1d1a20145f180 100644 --- a/llvm/test/CodeGen/RISCV/cheri/function-start-directives.ll +++ b/llvm/test/CodeGen/RISCV/cheri/function-start-directives.ll @@ -64,13 +64,12 @@ declare dso_local void @__cxa_end_catch() local_unnamed_addr addrspace(200) ; CHECK: .section .gcc_except_table ; CHECK: .word 12 # (landing pad is a capability) -; CHECK-NEXT: .chericap .Llpad0 # jumps to .Llpad0 +; CHECK-NEXT: .chericap .L_Z4testv$local+(.Ltmp2-.Lfunc_begin0) # jumps to .Ltmp2 ; OBJ-LABEL: Relocation section '.rela.gcc_except_table' at offset ; OBJ: Offset Info Type Symbol's Value Symbol's Name + Addend -; OBJ: 0000000000000020 000000{{..}}000000c1 R_RISCV_CHERI_CAPABILITY 0000000000000024 .Llpad0 + 0{{$}} +; OBJ: 0000000000000020 0000000{{.}}000000c1 R_RISCV_CHERI_CAPABILITY ; OBJ-LABEL: Symbol table '.symtab' contains ; OBJ: Value Size Type Bind Vis Ndx Name -; OBJ: 0000000000000000 60 FUNC LOCAL DEFAULT 2 .L_Z4testv$local ; OBJ: 0000000000000000 60 FUNC GLOBAL DEFAULT 2 _Z4testv diff --git a/llvm/test/CodeGen/RISCV/cheri/landing-pad.ll b/llvm/test/CodeGen/RISCV/cheri/landing-pad.ll index 321837816bef5..34d3ea4580c5e 100644 --- a/llvm/test/CodeGen/RISCV/cheri/landing-pad.ll +++ b/llvm/test/CodeGen/RISCV/cheri/landing-pad.ll @@ -26,7 +26,7 @@ define void @test() addrspace(200) personality i8 addrspace(200)* bitcast (i32 ( ; RV32IXCHERI-NEXT: .Ltmp1: ; RV32IXCHERI-NEXT: j .LBB0_2 ; RV32IXCHERI-NEXT: .LBB0_1: # %lpad -; RV32IXCHERI-NEXT: .Llpad0: +; RV32IXCHERI-NEXT: .Ltmp2: ; RV32IXCHERI-NEXT: ccall __cxa_begin_catch ; RV32IXCHERI-NEXT: ccall __cxa_end_catch ; RV32IXCHERI-NEXT: j .LBB0_2 @@ -48,7 +48,7 @@ define void @test() addrspace(200) personality i8 addrspace(200)* bitcast (i32 ( ; RV64IXCHERI-NEXT: .Ltmp1: ; RV64IXCHERI-NEXT: j .LBB0_2 ; RV64IXCHERI-NEXT: .LBB0_1: # %lpad -; RV64IXCHERI-NEXT: .Llpad0: +; RV64IXCHERI-NEXT: .Ltmp2: ; RV64IXCHERI-NEXT: ccall __cxa_begin_catch ; RV64IXCHERI-NEXT: ccall __cxa_end_catch ; RV64IXCHERI-NEXT: j .LBB0_2 diff --git a/llvm/test/MC/Mips/cheri/chericap-size-not-known-yet.s b/llvm/test/MC/Mips/cheri/chericap-size-not-known-yet.s index 29096fc8efab7..a0b37eb398f84 100644 --- a/llvm/test/MC/Mips/cheri/chericap-size-not-known-yet.s +++ b/llvm/test/MC/Mips/cheri/chericap-size-not-known-yet.s @@ -22,12 +22,12 @@ lmp_head_extern: .hidden .Llmp_head_local_unsized .Llmp_head_local_unsized: .chericap .Llmp_head_local_unsized+1 -# WARNING: [[@LINE-1]]:12: warning: creating a R_MIPS_CHERI_CAPABILITY relocation against an unsized defined symbol: .Llmp_head_local_unsized. +# WARNING: [[@LINE-1]]:36: warning: creating a R_MIPS_CHERI_CAPABILITY relocation against an unsized defined symbol: .Llmp_head_local_unsized. This will probably result in incorrect values at run time. .section .otherdata, "aw", %progbits .Lreloc_text_local: .chericap .Lfoo_start + 4 -# WARNING: [[@LINE-1]]:11: warning: creating a R_MIPS_CHERI_CAPABILITY relocation against an unsized defined symbol: .Lfoo_start. +# WARNING: [[@LINE-1]]:23: warning: creating a R_MIPS_CHERI_CAPABILITY relocation against an unsized defined symbol: .Lfoo_start. This will probably result in incorrect values at run time. .Lreloc_text_global: .chericap foo + 4 diff --git a/llvm/test/MC/RISCV/xqcili-relocations.s b/llvm/test/MC/RISCV/xqcili-relocations.s index 1994013845056..81d104493a946 100644 --- a/llvm/test/MC/RISCV/xqcili-relocations.s +++ b/llvm/test/MC/RISCV/xqcili-relocations.s @@ -28,7 +28,7 @@ qc.li a0, %qc.abs20(undef) # ASM: qc.e.li s0, undef # OBJ-NEXT: qc.e.li s0, 0x0 # OBJ-NEXT: R_RISCV_VENDOR QUALCOMM{{$}} -# OBJ-NEXT: R_RISCV_CUSTOM194 undef{{$}} +# OBJ-NEXT: R_RISCV_FUNC_RELATIVE undef{{$}} qc.e.li s0, undef @@ -50,7 +50,7 @@ qc.li a2, %qc.abs20(same_section) # ASM: qc.e.li s2, same_section # OBJ-NEXT: qc.e.li s2, 0x0 # OBJ-NEXT: R_RISCV_VENDOR QUALCOMM{{$}} -# OBJ-NEXT: R_RISCV_CUSTOM194 same_section{{$}} +# OBJ-NEXT: R_RISCV_FUNC_RELATIVE same_section{{$}} qc.e.li s2, same_section # ASM: qc.li a3, %qc.abs20(other_section) @@ -62,7 +62,7 @@ qc.li a3, %qc.abs20(other_section) # ASM: qc.e.li s3, other_section # OBJ-NEXT: qc.e.li s3, 0x0 # OBJ-NEXT: R_RISCV_VENDOR QUALCOMM{{$}} -# OBJ-NEXT: R_RISCV_CUSTOM194 other_section{{$}} +# OBJ-NEXT: R_RISCV_FUNC_RELATIVE other_section{{$}} qc.e.li s3, other_section # ASM-LABEL: same_section: @@ -93,7 +93,7 @@ qc.li a1, %qc.abs20(undef) # ASM: qc.e.li s1, undef # OBJ-NEXT: qc.e.li s1, 0x0 # OBJ-NEXT: R_RISCV_VENDOR QUALCOMM{{$}} -# OBJ-NEXT: R_RISCV_CUSTOM194 undef{{$}} +# OBJ-NEXT: R_RISCV_FUNC_RELATIVE undef{{$}} # OBJ-NEXT: R_RISCV_RELAX qc.e.li s1, undef @@ -110,7 +110,7 @@ qc.li a1, %qc.abs20(undef) # ASM: qc.e.li a2, undef # OBJ-NEXT: qc.e.li a2, 0x0 # OBJ-NEXT: R_RISCV_VENDOR QUALCOMM{{$}} -# OBJ-NEXT: R_RISCV_CUSTOM194 undef{{$}} +# OBJ-NEXT: R_RISCV_FUNC_RELATIVE undef{{$}} # OBJ-NEXT: R_RISCV_RELAX qc.e.li a2, undef diff --git a/llvm/tools/llvm-objdump/llvm-objdump.cpp b/llvm/tools/llvm-objdump/llvm-objdump.cpp index 789f456d684e8..23db46f291ea5 100644 --- a/llvm/tools/llvm-objdump/llvm-objdump.cpp +++ b/llvm/tools/llvm-objdump/llvm-objdump.cpp @@ -2715,6 +2715,7 @@ printELFCapRelocations(const ELFObjectFile *Obj) { const uint64_t Function = UINT64_C(1) << ((sizeof(TargetUint) * 8) - 1); const uint64_t Constant = UINT64_C(1) << ((sizeof(TargetUint) * 8) - 2); const uint64_t Indirect = UINT64_C(1) << ((sizeof(TargetUint) * 8) - 3); + const uint64_t Code = UINT64_C(1) << ((sizeof(TargetUint) * 8) - 4); StringRef PermStr; switch (Perms) { case 0: @@ -2729,6 +2730,9 @@ printELFCapRelocations(const ELFObjectFile *Obj) { case Function | Indirect: PermStr = " (GNU Indirect Function)"; break; + case Function | Code: + PermStr = " (Code)"; + break; default: PermStr = " (Unknown)"; break; diff --git a/llvm/tools/llvm-readobj/ELFDumper.cpp b/llvm/tools/llvm-readobj/ELFDumper.cpp index 213f5520e049c..1957efb93eb0c 100644 --- a/llvm/tools/llvm-readobj/ELFDumper.cpp +++ b/llvm/tools/llvm-readobj/ELFDumper.cpp @@ -3529,6 +3529,7 @@ template void ELFDumper::printCheriCapRelocs() { const uint64_t Function = UINT64_C(1) << ((sizeof(TargetUint) * 8) - 1); const uint64_t Constant = UINT64_C(1) << ((sizeof(TargetUint) * 8) - 2); const uint64_t Indirect = UINT64_C(1) << ((sizeof(TargetUint) * 8) - 3); + const uint64_t Code = UINT64_C(1) << ((sizeof(TargetUint) * 8) - 4); StringRef PermStr; switch (Perms) { case 0: @@ -3543,6 +3544,9 @@ template void ELFDumper::printCheriCapRelocs() { case Function | Indirect: PermStr = "GNU Indirect Function"; break; + case Function | Code: + PermStr = "Code"; + break; default: PermStr = "Unknown"; break;