diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 29a9329079335..30154a375870e 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -5111,14 +5111,6 @@ def mno_xcheri_rvc : Flag<["-"], "mno-xcheri-rvc">, Alias; def mno_xcheri_norvc : Flag<["-"], "mno-xcheri-norvc">, Group, HelpText<"Enable using compressed CHERI instructions">; def mxcheri_rvc : Flag<["-"], "mxcheri-rvc">, Alias; -// Temporary flags to enable/disable CHERI ISAv8 compatibility. -// Flag name is a bit odd but this is required by handleTargetFeaturesGroup(). -def mxcheri_v9_semantics : Flag<["-"], "mxcheri-v9-semantics">, Group, - HelpText<"Generate code that is no longer compatible with CHERI ISAv8">; -def mno_xcheri_v9_semantics : Flag<["-"], "mno-xcheri-v9-semantics">, Group, - HelpText<"Generate code that is compatible with CHERI ISAv8">; -// Add an alias with a more sensible name for when the default is flipped. -def mxcheri_v8_compat : Flag<["-"], "mxcheri-v8-compat">, Alias; def munaligned_access : Flag<["-"], "munaligned-access">, Group, HelpText<"Allow memory accesses to be unaligned (AArch32/MIPSr6 only)">; diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp index 078021af6f43a..a5050dc7940eb 100644 --- a/clang/lib/Basic/Targets/RISCV.cpp +++ b/clang/lib/Basic/Targets/RISCV.cpp @@ -317,11 +317,9 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts, // for the capability-mode JALR with immediate). Remove after the next // CHERI-LLVM "release". Builder.defineMacro("__riscv_xcheri_mode_dependent_jumps"); - // Temporary defines to allow software to detect a new ISAv9 compiler. - if (HasCheriISAv9Semantics) { - Builder.defineMacro("__riscv_xcheri_tag_clear"); - Builder.defineMacro("__riscv_xcheri_no_relocation"); - } + // Defines to allow software to detect a ISAv9 compiler vs. an older v8 one. + Builder.defineMacro("__riscv_xcheri_tag_clear"); + Builder.defineMacro("__riscv_xcheri_no_relocation"); } if (ISAInfo->hasExtension("zve32x")) @@ -463,8 +461,6 @@ bool RISCVTargetInfo::handleTargetFeatures(std::vector &Features, if (ISAInfo->hasExtension("xcheri")) { HasCheri = true; CapSize = XLen * 2; - HasCheriISAv9Semantics = - llvm::is_contained(Features, "+xcheri-v9-semantics"); } if (ABI.empty()) ABI = ISAInfo->computeDefaultABI().str(); diff --git a/clang/lib/Basic/Targets/RISCV.h b/clang/lib/Basic/Targets/RISCV.h index 740abadf5faa1..4a3aa40f3167a 100644 --- a/clang/lib/Basic/Targets/RISCV.h +++ b/clang/lib/Basic/Targets/RISCV.h @@ -73,7 +73,6 @@ class RISCVTargetInfo : public TargetInfo { std::unique_ptr ISAInfo; int CapSize = -1; bool HasCheri = false; - bool HasCheriISAv9Semantics = false; bool IsABICHERIoT = false; bool IsABICHERIoTBareMetal = false; void setCapabilityABITypes() { diff --git a/clang/test/CodeGen/cheri/cheri-hybrid-ptr-to-cap.c b/clang/test/CodeGen/cheri/cheri-hybrid-ptr-to-cap.c index 4e22e1382b686..5c803b4bb916a 100644 --- a/clang/test/CodeGen/cheri/cheri-hybrid-ptr-to-cap.c +++ b/clang/test/CodeGen/cheri/cheri-hybrid-ptr-to-cap.c @@ -8,6 +8,7 @@ // Check the assembly output to see if we used PCC or DDC // RUN: %cheri_cc1 -o - -S %s | FileCheck %s --check-prefixes=ASM,ASM-MIPS +// RUN: %riscv64_cheri_cc1 -o - -S %s // RUN: %riscv64_cheri_cc1 -o - -S %s | FileCheck %s --check-prefixes=ASM,ASM-RISCV void external_fn(void); @@ -25,11 +26,11 @@ void *__capability global_fn_to_cap(void) { // ASM-MIPS: cgetpcc $c1 // ASM-MIPS-NEXT: ld $1, %got_disp(external_fn)($1) // ASM-MIPS-NEXT: cfromptr $c3, $c1, $1 - // ASM-RISCV: cspecialr ca0, pcc - // ASM-RISCV: auipc a1, %got_pcrel_hi(external_fn) - // ASM-RISCV-NEXT: ld a1, %pcrel_lo(.Lpcrel_hi0)(a1) - // ASM-RISCV-NEXT: cfromptr ca0, ca0, a1 - return (__cheri_tocap void *__capability) & external_fn; + // ASM-RISCV: cspecialr ca1, pcc + // ASM-RISCV: auipc a0, %got_pcrel_hi(external_fn) + // ASM-RISCV-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi0)(a0) + // ASM-RISCV-NEXT: csetaddr ca1, ca1, a0 + return (__cheri_tocap void *__capability)&external_fn; } // CHECK-LABEL: define {{[^@]+}}@global_data_to_cap @@ -45,10 +46,11 @@ void *__capability global_data_to_cap(void) { // ASM-MIPS-NEXT: csetbounds $c3, $c1, 4 // ASM-RISCV: auipc a0, %got_pcrel_hi(external_global) // ASM-RISCV-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi1)(a0) - // ASM-RISCV-NEXT: cfromptr ca0, ddc, a0 + // ASM-RISCV-NEXT: cspecialr ca1, ddc + // ASM-RISCV-NEXT: csetaddr ca1, ca1, a0 // We do not set bounds on RISCV // ASM-RISCV-NOT: csetbounds - return (__cheri_tocap void *__capability) & external_global; + return (__cheri_tocap void *__capability)&external_global; } // CHECK-LABEL: define {{[^@]+}}@fn_ptr_to_cap @@ -66,8 +68,8 @@ void *__capability fn_ptr_to_cap(void (*fn_ptr)(void)) { // ASM-LABEL: fn_ptr_to_cap: // ASM-MIPS: cgetpcc $c1 // ASM-MIPS-NEXT: cfromptr $c3, $c1, $1 - // ASM-RISCV: cspecialr ca0, pcc - // ASM-RISCV-NEXT: cfromptr ca0, ca0, a1 + // ASM-RISCV: cspecialr ca1, pcc + // ASM-RISCV-NEXT: csetaddr ca1, ca1, a0 return (__cheri_tocap void *__capability)fn_ptr; } @@ -86,7 +88,8 @@ void *__capability fn_ptr_to_cap(void (*fn_ptr)(void)) { void *__capability fn_ptr_to_cap_not_smart_enough(void (*fn_ptr)(void)) { // ASM-LABEL: fn_ptr_to_cap_not_smart_enough: // ASM-MIPS: cfromddc $c3, $1 - // ASM-RISCV: cfromptr ca0, ddc, a0 + // ASM-RISCV: cspecialr ca1, ddc + // ASM-RISCV-NEXT: csetaddr ca1, ca1, a0 // Note: In this case clang doesn't see that the result is actual a function // so it uses DDC: void *tmp = (void *)fn_ptr; @@ -106,6 +109,7 @@ void *__capability data_ptr_to_cap(int *data_ptr) { // Note: For data pointers we derive from DDC: // ASM-LABEL: data_ptr_to_cap: // ASM-MIPS: cfromddc $c3, $1 - // ASM-RISCV: cfromptr ca0, ddc, a0 + // ASM-RISCV: cspecialr ca1, ddc + // ASM-RISCV-NEXT: csetaddr ca1, ca1, a0 return (__cheri_tocap void *__capability)data_ptr; } diff --git a/clang/test/Driver/riscv-default-features.c b/clang/test/Driver/riscv-default-features.c index ec42837489852..b8bc90bd2d8e9 100644 --- a/clang/test/Driver/riscv-default-features.c +++ b/clang/test/Driver/riscv-default-features.c @@ -1,44 +1,33 @@ // RUN: %clang --target=riscv32-unknown-elf -S -emit-llvm %s -o - | FileCheck %s -check-prefix=RV32 // RUN: %clang --target=riscv64-unknown-elf -S -emit-llvm %s -o - | FileCheck %s -check-prefix=RV64 -// RUN: %clang --target=riscv32-unknown-elf -march=rv32ixcheri -S -emit-llvm %s -o - | FileCheck %s -check-prefix=RV32-XCHERI -// RUN: %clang --target=riscv64-unknown-elf -march=rv64ixcheri -S -emit-llvm %s -o - | FileCheck %s -check-prefix=RV64-XCHERI -// RUN: %clang --target=riscv32-unknown-elf -march=rv32ixcheri -S -mno-xcheri-rvc -emit-llvm %s -o - | FileCheck %s -check-prefix=RV32-XCHERI-NORVC -// RUN: %clang --target=riscv64-unknown-elf -march=rv64ixcheri -S -mno-xcheri-rvc -emit-llvm %s -o - | FileCheck %s -check-prefix=RV64-XCHERI-NORVC -// RUN: %clang --target=riscv32-unknown-elf -march=rv32ixcheri -S -mxcheri-norvc -emit-llvm %s -o - | FileCheck %s -check-prefix=RV32-XCHERI-NORVC -// RUN: %clang --target=riscv64-unknown-elf -march=rv64ixcheri -S -mxcheri-norvc -emit-llvm %s -o - | FileCheck %s -check-prefix=RV64-XCHERI-NORVC +// RUN: %clang --target=riscv32-unknown-elf -march=rv32ixcheri -S -emit-llvm %s -o - | FileCheck %s --check-prefixes=RV32-XCHERI,XCHERI +// RUN: %clang --target=riscv64-unknown-elf -march=rv64ixcheri -S -emit-llvm %s -o - | FileCheck %s --check-prefixes=RV64-XCHERI,XCHERI +// RUN: %clang --target=riscv32-unknown-elf -march=rv32ixcheri -S -mno-xcheri-rvc -emit-llvm %s -o - | FileCheck %s --check-prefixes=RV32-XCHERI-NORVC,XCHERI +// RUN: %clang --target=riscv64-unknown-elf -march=rv64ixcheri -S -mno-xcheri-rvc -emit-llvm %s -o - | FileCheck %s --check-prefixes=RV64-XCHERI-NORVC,XCHERI +// RUN: %clang --target=riscv32-unknown-elf -march=rv32ixcheri -S -mxcheri-norvc -emit-llvm %s -o - | FileCheck %s --check-prefixes=RV32-XCHERI-NORVC,XCHERI +// RUN: %clang --target=riscv64-unknown-elf -march=rv64ixcheri -S -mxcheri-norvc -emit-llvm %s -o - | FileCheck %s --check-prefixes=RV64-XCHERI-NORVC,XCHERI // The -mxcheri-rvc flag to explicitly disable xcheri-norvc: -// RUN: %clang --target=riscv32-unknown-elf -march=rv32ixcheri -S -mxcheri-rvc -emit-llvm %s -o - | FileCheck %s -check-prefix=RV32-XCHERI-EXPLICIT-RVC -// RUN: %clang --target=riscv64-unknown-elf -march=rv64ixcheri -S -mxcheri-rvc -emit-llvm %s -o - | FileCheck %s -check-prefix=RV64-XCHERI-EXPLICIT-RVC -// RUN: %clang --target=riscv32-unknown-elf -march=rv32ixcheri -S -mno-xcheri-norvc -emit-llvm %s -o - | FileCheck %s -check-prefix=RV32-XCHERI-EXPLICIT-RVC -// RUN: %clang --target=riscv64-unknown-elf -march=rv64ixcheri -S -mno-xcheri-norvc -emit-llvm %s -o - | FileCheck %s -check-prefix=RV64-XCHERI-EXPLICIT-RVC - -// RUN: %clang --target=riscv64-unknown-elf -march=rv64ixcheri -mxcheri-v9-semantics -S -emit-llvm %s -o - | FileCheck %s -check-prefix=RV64-XCHERI-V9 -// RUN: %clang --target=riscv64-unknown-elf -march=rv64ixcheri -mxcheri-v8-compat -S -emit-llvm %s -o - | FileCheck %s -check-prefix=RV64-XCHERI-V8 +// RUN: %clang --target=riscv32-unknown-elf -march=rv32ixcheri -S -mxcheri-rvc -emit-llvm %s -o - | FileCheck %s --check-prefixes=RV32-XCHERI,XCHERI,XCHERI-RVC +// RUN: %clang --target=riscv64-unknown-elf -march=rv64ixcheri -S -mxcheri-rvc -emit-llvm %s -o - | FileCheck %s --check-prefixes=RV64-XCHERI,XCHERI,XCHERI-RVC +// RUN: %clang --target=riscv32-unknown-elf -march=rv32ixcheri -S -mno-xcheri-norvc -emit-llvm %s -o - | FileCheck %s --check-prefixes=RV32-XCHERI,XCHERI,XCHERI-RVC +// RUN: %clang --target=riscv64-unknown-elf -march=rv64ixcheri -S -mno-xcheri-norvc -emit-llvm %s -o - | FileCheck %s --check-prefixes=RV64-XCHERI,XCHERI,XCHERI-RVC // RV32: "target-features"="+32bit,+a,+c,+m,+relax, // RV64: "target-features"="+64bit,+a,+c,+m,+relax, -// RV32-XCHERI: "target-features"="+32bit,+relax,+xcheri -// RV64-XCHERI: "target-features"="+64bit,+relax,+xcheri +// RV32-XCHERI: "target-features"="+32bit,+relax,+xcheri, +// RV64-XCHERI: "target-features"="+64bit,+relax,+xcheri, // RV32-XCHERI-RVC: "target-features"="+32bit,+relax,+xcheri -// RV32-XCHERI-RVC: -save-restore // RV64-XCHERI-RVC: "target-features"="+64bit,+relax,+xcheri -// RV64-XCHERI-RVC: -save-restore // RV32-XCHERI-NORVC: "target-features"="+32bit,+relax,+xcheri,+xcheri-norvc // RV64-XCHERI-NORVC: "target-features"="+64bit,+relax,+xcheri,+xcheri-norvc // RV32-XCHERI-EXPLICIT-RVC: "target-features"="+32bit,+relax,+xcheri -// RV32-XCHERI-EXPLICIT-RVC-SAME -save-restore -// RV32-XCHERI-EXPLICIT-RVC-SAME -xcheri-norvc +// XCHERI-RVC-SAME: ,-xcheri-norvc, // RV64-XCHERI-EXPLICIT-RVC: "target-features"="+64bit,+relax,+xcheri -// RV64-XCHERI-EXPLICIT-RVC-SAME -save-restore -// RV64-XCHERI-EXPLICIT-RVC-SAME -xcheri-norvc -// RV64-XCHERI-V8: "target-features"="+64bit,+relax,+xcheri -// RV64-XCHERI-V8-SAME: -xcheri-v9-semantics -// RV64-XCHERI-V9: "target-features"="+64bit,+relax,+xcheri -// RV64-XCHERI-V9-SAME: +xcheri-v9-semantics +// XCHERI-NOT: xcheri, // Dummy function int foo(void){ diff --git a/clang/test/Preprocessor/cheri-riscv-feature-flags.c b/clang/test/Preprocessor/cheri-riscv-feature-flags.c index 8c4f01cd6fe52..886ede81e78f2 100644 --- a/clang/test/Preprocessor/cheri-riscv-feature-flags.c +++ b/clang/test/Preprocessor/cheri-riscv-feature-flags.c @@ -1,11 +1,6 @@ // RUN: %riscv64_cheri_cc1 -E -dM -ffreestanding < /dev/null \ -// RUN: | FileCheck %s --check-prefixes=CHECK,CHECK64 --implicit-check-not=cheri --implicit-check-not=CHERI -// RUN: %riscv32_cheri_cc1 -E -dM -ffreestanding < /dev/null \ -// RUN: | FileCheck %s --check-prefixes=CHECK,CHECK32 --implicit-check-not=cheri --implicit-check-not=CHERI -/// Check for the new flags for removed ISAv8 compatibility: -// RUN: %riscv64_cheri_cc1 -E -dM -ffreestanding -target-feature +xcheri-v9-semantics < /dev/null \ // RUN: | FileCheck %s --check-prefixes=CHECK,CHECK64,CHECK-V9ISA --implicit-check-not=cheri --implicit-check-not=CHERI -// RUN: %riscv32_cheri_cc1 -E -dM -ffreestanding -target-feature +xcheri-v9-semantics < /dev/null \ +// RUN: %riscv32_cheri_cc1 -E -dM -ffreestanding < /dev/null \ // RUN: | FileCheck %s --check-prefixes=CHECK,CHECK32,CHECK-V9ISA --implicit-check-not=cheri --implicit-check-not=CHERI // CHECK32: #define __CHERI_ADDRESS_BITS__ 32 diff --git a/lld/ELF/Arch/Cheri.cpp b/lld/ELF/Arch/Cheri.cpp index 0f4595cb18e71..db01de94592de 100644 --- a/lld/ELF/Arch/Cheri.cpp +++ b/lld/ELF/Arch/Cheri.cpp @@ -14,10 +14,6 @@ using namespace llvm; using namespace llvm::object; using namespace llvm::ELF; -// Change these to #define for extremely verbose debug output -#undef DEBUG_CAP_RELOCS -#undef DEBUG_CAP_TABLE - namespace lld { namespace elf { @@ -355,13 +351,6 @@ void CheriCapRelocsSection::addCapReloc(CheriCapRelocLocation loc, return; // Maybe happens with vtables? } if (targetNeedsDynReloc) { -#ifdef DEBUG_CAP_RELOCS - message("Adding dyn reloc at " + toString(this) + "+0x" + - utohexstr(CurrentEntryOffset) + " against " + - Target.verboseToString()); - message("Symbol preemptible:" + Twine(Target.Sym->IsPreemptible)); -#endif - bool relativeToLoadAddress = false; // The addend is not used as the offset into the capability here, as we // have the offset field in the __cap_relocs for that. The Addend @@ -468,12 +457,6 @@ static uint64_t getTargetSize(Ctx &ctx, const CheriCapRelocLocation &location, // past the section if (offsetInOS <= os->size) { targetSize = os->size - offsetInOS; -#ifdef DEBUG_CAP_RELOCS - if (ctx.arg.verboseCapRelocs) - errs() << " OS OFFSET 0x" << utohexstr(OS->Addr) << "SYM OFFSET 0x" - << utohexstr(OffsetInOS) << " SECLEN 0x" << utohexstr(OS->Size) - << " -> target size 0x" << utohexstr(TargetSize) << "\n"; -#endif UnknownSectionSize = false; } } @@ -715,17 +698,6 @@ void CheriCapTableSection::addEntry(Symbol &sym, RelExpr expr, // TODO: should we emit two relocations instead? if (!idx.usedInCallExpr) it.first->second.usedInCallExpr = false; } -#if defined(DEBUG_CAP_TABLE) - std::string DbgContext; - if (ctx.arg.CapTableScope == CapTableScopePolicy::File) { - DbgContext = " for file '" + toString(IS->File) + "'"; - } else if (ctx.arg.CapTableScope == CapTableScopePolicy::Function) { - DbgContext = - " for function '" + toString(*findMatchingFunction(IS, Offset)) + "'"; - } - llvm::errs() << "Added symbol " << toString(Sym) << " to .captable" - << DbgContext << ". Total count " << Entries.size() << "\n"; -#endif } void CheriCapTableSection::addDynTlsEntry(Symbol &sym) { @@ -755,11 +727,6 @@ uint32_t CheriCapTableSection::getIndex(const Symbol &sym, // start of the current captable subset (or the global table in the default // case). When using per-function tables the first index in every function // will always be zero. -#if defined(DEBUG_CAP_TABLE) - message("captable index for " + toString(Sym) + " is " + - Twine(*it->second.Index) + " - " + Twine(Entries.FirstIndex) + ": " + - Twine(*it->second.Index - Entries.FirstIndex)); -#endif return *it->second.index - entries.firstIndex; } diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td index 8a031316b6fad..5fdb01a548bbb 100644 --- a/llvm/lib/Target/RISCV/RISCVFeatures.td +++ b/llvm/lib/Target/RISCV/RISCVFeatures.td @@ -1356,20 +1356,6 @@ def RV64 : HwMode<"+64bit", [IsRV64]>; def IsRVE : Predicate<"Subtarget->isRVE()">, AssemblerPredicate<(all_of FeatureStdExtE)>; -// TODO: Once all supported CheriBSD branches are ready for the new semantics -// this feature should be enabled automatically. -def FeatureCheriISAV9Semantics - : SubtargetFeature<"xcheri-v9-semantics", "HasCheriISAv9Semantics", "true", - "CHERI ISAv9 semantics (tag-clearing, no relocation)">; -def HasCheriISAv9 - : Predicate<"Subtarget->hasCheriISAv9Semantics()">, - AssemblerPredicate<(all_of FeatureCheriISAV9Semantics), - "CHERI ISAv9 semantics (tag-clearing, no relocation)">; -def NotCheriISAv9 - : Predicate<"!Subtarget->hasCheriISAv9Semantics()">, - AssemblerPredicate<(all_of (not FeatureCheriISAV9Semantics)), - "CHERI ISAv8 semantics (trapping, DDC/PCC relocation)">; - def FeatureVendorXCheri : RISCVExtension<0, 0, "Implements CHERI extension">; def HasCheri : Predicate<"Subtarget->hasVendorXCheri()">, diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index f3533652b945e..f8792e859ccc3 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -672,8 +672,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction(ISD::GlobalTLSAddress, CLenVT, Custom); setOperationAction(ISD::ADDRSPACECAST, CLenVT, Custom); setOperationAction(ISD::ADDRSPACECAST, XLenVT, Custom); - if (Subtarget.hasCheriISAv9Semantics() && - !RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI())) { + if (!RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI())) { setOperationAction(ISD::PTRTOINT, XLenVT, Custom); setOperationAction(ISD::INTTOPTR, CLenVT, Custom); } @@ -6909,8 +6908,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op, case ISD::INTTOPTR: { SDValue Op0 = Op.getOperand(0); if (Op.getValueType().isFatPointer()) { - assert(Subtarget.hasCheriISAv9Semantics() && - !RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI())); + assert(!RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI())); if (isNullConstant(Op0)) { // Do not custom lower (inttoptr 0) here as that is the canonical // representation of capability NULL, and expanding it here disables @@ -6936,8 +6934,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op, case ISD::PTRTOINT: { SDValue Op0 = Op.getOperand(0); if (Op0.getValueType().isFatPointer()) { - assert(Subtarget.hasCheriISAv9Semantics() && - !RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI())); + assert(!RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI())); // In purecap ptrtoint is lowered to an address read using a tablegen // pattern, but for hybrid mode we need to emit the expansion here as // CToPtr is no longer part of ISAv9. @@ -10064,22 +10061,15 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, Index); } case Intrinsic::cheri_cap_from_pointer: - // Expand CFromPtr if the dedicated instruction has been removed. - if (Subtarget.hasCheriISAv9Semantics()) { - return emitCFromPtrReplacement(DAG, DL, Op.getOperand(1), - Op.getOperand(2), Op.getValueType(), - XLenVT); - } - break; + // Expand CFromPtr since the dedicated instruction has been removed. + return emitCFromPtrReplacement(DAG, DL, Op.getOperand(1), Op.getOperand(2), + Op.getValueType(), XLenVT); case Intrinsic::cheri_cap_to_pointer: - // Expand CToPtr if the dedicated instruction has been removed. - if (Subtarget.hasCheriISAv9Semantics()) { - // NB: DDC/PCC relocation has been removed, so we no longer subtract the - // base of the authorizing capability. This is consistent with the - // behaviour of Morello's CVT instruction when CCTLR.DDCBO is off. - return emitCToPtrReplacement(DAG, DL, Op->getOperand(2), XLenVT); - } - break; + // Expand CToPtr since the dedicated instruction has been removed. + // NB: DDC/PCC relocation has been removed, so we no longer subtract the + // base of the authorizing capability. This is consistent with the + // behaviour of Morello's CVT instruction when CCTLR.DDCBO is off. + return emitCToPtrReplacement(DAG, DL, Op->getOperand(2), XLenVT); case Intrinsic::thread_pointer: { MCPhysReg PhysReg = RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI()) ? RISCV::C4 : RISCV::X4; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index 539b8518faa8d..25464e55d61a7 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -1814,39 +1814,7 @@ bool RISCVInstrInfo::isSetBoundsInstr(const MachineInstr &I, } bool RISCVInstrInfo::isGuaranteedNotToTrap(const llvm::MachineInstr &MI) const { - const RISCVSubtarget &ST = MI.getMF()->getSubtarget(); - // TODO: This function can be removed once ISAv8 semantics are no longer - // supported and the tablegen definitions have been updated to remove the - // mayTrap/@traps_if_sealed flags. - if (ST.hasCheriISAv9Semantics()) { - // All these instructions were changed to non-trapping. - switch (MI.getOpcode()) { - case RISCV::CAndPerm: - case RISCV::CBuildCap: - case RISCV::CCopyType: - case RISCV::CCSeal: - case RISCV::CFromPtr: - case RISCV::CIncOffset: - case RISCV::CIncOffsetImm: - case RISCV::CSeal: - case RISCV::CSealEntry: - case RISCV::CSetAddr: - case RISCV::CSetBounds: - case RISCV::CSetBoundsExact: - case RISCV::CSetBoundsImm: - case RISCV::CSetFlags: - case RISCV::CSetHigh: - case RISCV::CSetOffset: - case RISCV::CToPtr: - case RISCV::CUnseal: - return true; - default: - llvm_unreachable("Unexpected instruction in isGuaranteedNotToTrap"); - return false; - } - } - if (isGuaranteedValidSetBounds(MI)) - return true; + llvm_unreachable("Should not be called for any CHERI-RISC-V instructions"); return false; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXCheri.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXCheri.td index 84455c9d3507f..dc1524716a230 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXCheri.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXCheri.td @@ -405,12 +405,9 @@ let Predicates = [HasCheriot] in { //===----------------------------------------------------------------------===// let Predicates = [HasCheri] in { -let Constraints = "@traps_if_sealed $rs1" in { -let mayTrap = 1 in { let defsCanBeSealed = 1 in def CSeal : Cheri_rr<0xb, "cseal", GPCR, GPCR>; def CUnseal : Cheri_rr<0xc, "cunseal", GPCR, GPCR>; -} // let mayTrap = 1 def CAndPerm : Cheri_rr<0xd, "candperm">; def CSetFlags : Cheri_rr<0xe, "csetflags">; def CSetOffset : Cheri_rr<0xf, "csetoffset">; @@ -420,16 +417,11 @@ let isReMaterializable = 1, isAsCheapAsAMove = 1 in def CIncOffset : Cheri_rr<0x11, "cincoffset">; let isReMaterializable = 1, isAsCheapAsAMove = 1 in def CIncOffsetImm : Cheri_ri<0x1, "cincoffset", 1>; -// Note: mayTrap is optimized in RISCVInstrInfo::isGuaranteedNotToTrap() -let mayTrap = 1 in { def CSetBounds : Cheri_rr<0x8, "csetbounds">; def CSetBoundsExact : Cheri_rr<0x9, "csetboundsexact">; def CSetBoundsImm : Cheri_ri<0x2, "csetbounds", 0>; def CSetBoundsRoundDown : Cheri_rr<0xA, "csetboundsrounddown">; -} // mayTrap = 1 -} // let Constraints = "@traps_if_sealed $rs1" def CClearTag : Cheri_r<0xb, "ccleartag", GPCR>; -let mayTrap = 1 in { let defsCanBeSealed = 1 in def CBuildCap : Cheri_rr<0x1d, "cbuildcap", GPCR, GPCR, GPCRC0IsDDC>; def CCopyType : Cheri_rr<0x1e, "ccopytype", GPCR, GPCR>; @@ -437,7 +429,6 @@ let defsCanBeSealed = 1 in def CCSeal : Cheri_rr<0x1f, "ccseal", GPCR, GPCR>; let defsCanBeSealed = 1 in def CSealEntry : Cheri_r<0x11, "csealentry", GPCR>; -} // let mayTrap = 1 def : InstAlias<"cincoffsetimm $cd, $cs1, $imm", (CIncOffsetImm GPCR:$cd, GPCR:$cs1, simm12:$imm), 0>; @@ -486,10 +477,6 @@ let Predicates = [HasCheriot] in { // Pointer-Arithmetic Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasCheri, NotCheriISAv9], mayTrap = 1 in { -def CToPtr : Cheri_rr<0x12, "ctoptr", GPR, GPCRC0IsDDC>; -def CFromPtr : Cheri_rr<0x13, "cfromptr", GPCR, GPR, GPCRC0IsDDC>; -} let Predicates = [HasCheri] in { let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0, isAsmParserOnly = 1 in @@ -1471,17 +1458,6 @@ def : Pat<(CapFrameAddrRegImm GPCR:$rs1, simm12:$imm12), // int_cheri_cap_to_pointer and int_cheri_cap_from_pointer are expanded in C++ // code for ISAv9, so we don't need replacement patterns here. -// TODO: these intrinsics should probably be removed at some point. -let Predicates = [HasCheri, NotCheriISAv9] in { -def : Pat<(XLenVT (int_cheri_cap_to_pointer GPCR:$rs1, GPCRC0IsDDC:$rs2)), - (CToPtr GPCR:$rs1, GPCRC0IsDDC:$rs2)>; -def : Pat<(XLenVT (int_cheri_cap_to_pointer (int_cheri_ddc_get), GPCR:$rs1)), - (CToPtr $rs1, DDC)>; -def : Pat<(int_cheri_cap_from_pointer GPCRC0IsDDC:$rs1, (XLenVT GPR:$rs2)), - (CFromPtr GPCRC0IsDDC:$rs1, GPR:$rs2)>; -def : Pat<(int_cheri_cap_from_pointer (int_cheri_ddc_get), (XLenVT GPR:$rs2)), - (CFromPtr DDC, $rs2)>; -} // let Predicates = [HasCheri, NotCheriISAv9] def : Pat<(XLenVT (int_cheri_cap_diff GPCR:$cs1, GPCR:$cs2)), (SUB (XLenVT (EXTRACT_SUBREG GPCR:$cs1, sub_cap_addr)), (XLenVT (EXTRACT_SUBREG GPCR:$cs2, sub_cap_addr)))>; @@ -1492,11 +1468,6 @@ def : Pat<(inttoptr simm12:$imm12), (CIncOffsetImm C0, simm12:$imm12)>; def : Pat<(XLenVT (ptrtoint GPCR:$rs1)), (PseudoCGetAddr GPCR:$rs1)>; } -let Predicates = [NotPureCapABI, NotCheriISAv9] in { -def : Pat<(inttoptr (XLenVT GPR:$rs2)), (CFromPtr DDC, (XLenVT GPR:$rs2))>; -def : Pat<(XLenVT (ptrtoint GPCR:$rs1)), (CToPtr GPCR:$rs1, DDC)>; -} - /// Null Capability Patterns def : Pat<(inttoptr (XLenVT 0)), (CLenVT (COPY C0))>; diff --git a/llvm/test/CodeGen/CHERI-Generic/Inputs/gvn-capability-store-to-load-fwd.ll b/llvm/test/CodeGen/CHERI-Generic/Inputs/gvn-capability-store-to-load-fwd.ll index 285334f843f2e..8ceb387ec4487 100644 --- a/llvm/test/CodeGen/CHERI-Generic/Inputs/gvn-capability-store-to-load-fwd.ll +++ b/llvm/test/CodeGen/CHERI-Generic/Inputs/gvn-capability-store-to-load-fwd.ll @@ -22,8 +22,7 @@ target datalayout = "@PURECAP_DATALAYOUT@" define i32 @first_i32_store_to_load_fwd(ptr addrspace(200) %arg) local_unnamed_addr addrspace(200) nounwind { %stackval = alloca %struct.addrinfo, align @CAP_BYTES@, addrspace(200) %field = getelementptr inbounds %struct.addrinfo, ptr addrspace(200) %stackval, i64 0, i32 0 - %as_cap = bitcast ptr addrspace(200) %stackval to ptr addrspace(200) - store ptr addrspace(200) %arg, ptr addrspace(200) %as_cap, align @CAP_BYTES@ + store ptr addrspace(200) %arg, ptr addrspace(200) %stackval, align @CAP_BYTES@ %result = load i32, ptr addrspace(200) %field, align 4 ret i32 %result } @@ -31,8 +30,7 @@ define i32 @first_i32_store_to_load_fwd(ptr addrspace(200) %arg) local_unnamed_a define i32 @second_i32_store_to_load_fwd(ptr addrspace(200) %arg) local_unnamed_addr addrspace(200) nounwind { %stackval = alloca %struct.addrinfo, align @CAP_BYTES@, addrspace(200) %field = getelementptr inbounds %struct.addrinfo, ptr addrspace(200) %stackval, i64 0, i32 1 - %as_cap = bitcast ptr addrspace(200) %stackval to ptr addrspace(200) - store ptr addrspace(200) %arg, ptr addrspace(200) %as_cap, align @CAP_BYTES@ + store ptr addrspace(200) %arg, ptr addrspace(200) %stackval, align @CAP_BYTES@ %result = load i32, ptr addrspace(200) %field, align 4 ret i32 %result } @@ -40,8 +38,7 @@ define i32 @second_i32_store_to_load_fwd(ptr addrspace(200) %arg) local_unnamed_ define i32 @third_i32_store_to_load_fwd(ptr addrspace(200) %arg) local_unnamed_addr addrspace(200) nounwind { %stackval = alloca %struct.addrinfo, align @CAP_BYTES@, addrspace(200) %field = getelementptr inbounds %struct.addrinfo, ptr addrspace(200) %stackval, i64 0, i32 2 - %as_cap = bitcast ptr addrspace(200) %stackval to ptr addrspace(200) - store ptr addrspace(200) %arg, ptr addrspace(200) %as_cap, align @CAP_BYTES@ + store ptr addrspace(200) %arg, ptr addrspace(200) %stackval, align @CAP_BYTES@ %result = load i32, ptr addrspace(200) %field, align 4 ret i32 %result } @@ -49,8 +46,7 @@ define i32 @third_i32_store_to_load_fwd(ptr addrspace(200) %arg) local_unnamed_a define i32 @fourth_i32_store_to_load_fwd(ptr addrspace(200) %arg) local_unnamed_addr addrspace(200) nounwind { %stackval = alloca %struct.addrinfo, align @CAP_BYTES@, addrspace(200) %field = getelementptr inbounds %struct.addrinfo, ptr addrspace(200) %stackval, i64 0, i32 3 - %as_cap = bitcast ptr addrspace(200) %stackval to ptr addrspace(200) - store ptr addrspace(200) %arg, ptr addrspace(200) %as_cap, align @CAP_BYTES@ + store ptr addrspace(200) %arg, ptr addrspace(200) %stackval, align @CAP_BYTES@ %result = load i32, ptr addrspace(200) %field, align 4 ret i32 %result } diff --git a/llvm/test/CodeGen/CHERI-Generic/Inputs/machinelicm-hoist-csetbounds.ll b/llvm/test/CodeGen/CHERI-Generic/Inputs/machinelicm-hoist-csetbounds.ll index db36f6b731eb7..c66c03d472388 100644 --- a/llvm/test/CodeGen/CHERI-Generic/Inputs/machinelicm-hoist-csetbounds.ll +++ b/llvm/test/CodeGen/CHERI-Generic/Inputs/machinelicm-hoist-csetbounds.ll @@ -30,9 +30,7 @@ declare ptr addrspace(200) @llvm.cheri.cap.bounds.set.iCAPRANGE(ptr addrspace(20 define dso_local void @hoist_csetbounds(i32 signext %cond, ptr addrspace(200) %f) local_unnamed_addr addrspace(200) nounwind { entry: %tobool = icmp eq ptr addrspace(200) %f, null - %0 = bitcast ptr addrspace(200) %f to ptr addrspace(200) %dst = getelementptr inbounds %struct.foo, ptr addrspace(200) %f, i64 0, i32 1 - %1 = bitcast ptr addrspace(200) %dst to ptr addrspace(200) br label %for.body for.cond.cleanup: ; preds = %for.inc @@ -43,10 +41,8 @@ for.body: ; preds = %for.inc, %entry br i1 %tobool, label %for.inc, label %if.then if.then: ; preds = %for.body - %2 = call ptr addrspace(200) @llvm.cheri.cap.bounds.set.iCAPRANGE(ptr addrspace(200) nonnull %0, iCAPRANGE 4) - %address.with.bounds = bitcast ptr addrspace(200) %2 to ptr addrspace(200) - %3 = call ptr addrspace(200) @llvm.cheri.cap.bounds.set.iCAPRANGE(ptr addrspace(200) nonnull %1, iCAPRANGE 4) - %address.with.bounds1 = bitcast ptr addrspace(200) %3 to ptr addrspace(200) + %address.with.bounds = call ptr addrspace(200) @llvm.cheri.cap.bounds.set.iCAPRANGE(ptr addrspace(200) nonnull %f, iCAPRANGE 4) + %address.with.bounds1 = call ptr addrspace(200) @llvm.cheri.cap.bounds.set.iCAPRANGE(ptr addrspace(200) nonnull %dst, iCAPRANGE 4) call void @call(ptr addrspace(200) %address.with.bounds, ptr addrspace(200) %address.with.bounds1) br label %for.inc diff --git a/llvm/test/CodeGen/CHERI-Generic/Inputs/stack-bounds-dynamic-alloca.ll b/llvm/test/CodeGen/CHERI-Generic/Inputs/stack-bounds-dynamic-alloca.ll index d8266a77dffe7..8ada6b8340eb4 100644 --- a/llvm/test/CodeGen/CHERI-Generic/Inputs/stack-bounds-dynamic-alloca.ll +++ b/llvm/test/CodeGen/CHERI-Generic/Inputs/stack-bounds-dynamic-alloca.ll @@ -24,8 +24,7 @@ do_alloca: ; preds = %entry br label %use_alloca_no_bounds use_alloca_no_bounds: ; preds = %do_alloca - %ptr = bitcast ptr addrspace(200) %alloca to ptr addrspace(200) - %ptr_plus_one = getelementptr i64, ptr addrspace(200) %ptr, i64 1 + %ptr_plus_one = getelementptr i64, ptr addrspace(200) %alloca, i64 1 store i64 1234, ptr addrspace(200) %ptr_plus_one, align 8 br label %use_alloca_need_bounds @@ -47,8 +46,7 @@ do_alloca: ; preds = %entry br label %use_alloca_no_bounds use_alloca_no_bounds: ; preds = %do_alloca - %ptr = bitcast ptr addrspace(200) %alloca to ptr addrspace(200) - %ptr_plus_one = getelementptr i64, ptr addrspace(200) %ptr, i64 1 + %ptr_plus_one = getelementptr i64, ptr addrspace(200) %alloca, i64 1 store i64 1234, ptr addrspace(200) %ptr_plus_one, align 8 br label %use_alloca_need_bounds diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/atomic-rmw-cap-ptr-arg.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/atomic-rmw-cap-ptr-arg.ll index d942c6ab7c59a..8eec013e2b428 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/atomic-rmw-cap-ptr-arg.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/atomic-rmw-cap-ptr-arg.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/atomic-rmw-cap-ptr-arg.ll ; Check that we can generate sensible code for atomic operations using capability pointers on capabilities ; See https://github.com/CTSRD-CHERI/llvm-project/issues/470 diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/atomic-rmw-cap-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/atomic-rmw-cap-ptr.ll index 72e6aa8853678..d003397912343 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/atomic-rmw-cap-ptr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/atomic-rmw-cap-ptr.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/atomic-rmw-cap-ptr.ll ; Check that we can generate sensible code for atomic operations using capability pointers ; https://github.com/CTSRD-CHERI/llvm-project/issues/470 diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/bounded-allocas-lifetimes.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/bounded-allocas-lifetimes.ll index 934d2fc756c57..85833b334a878 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/bounded-allocas-lifetimes.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/bounded-allocas-lifetimes.ll @@ -1,5 +1,6 @@ -; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/bounded-allocas-lifetimes.ll +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; CHERI-GENERIC-UTC: mir ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap %s -o - --stop-after=finalize-isel | FileCheck %s diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/cap-from-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/cap-from-ptr.ll index 261e114501a25..e643640e63caf 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/cap-from-ptr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/cap-from-ptr.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/cap-from-ptr.ll ;; Check that we can correctly generate code for llvm.cheri.cap.from.pointer() ;; This previously asserted on RISC-V due to a broken ISel pattern. @@ -7,7 +7,7 @@ ; RUN: opt -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap -passes=instcombine -S < %s | llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap | FileCheck %s --check-prefix=PURECAP ; RUN: opt -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi n64 -passes=instcombine -S < %s | llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi n64 | FileCheck %s --check-prefix=HYBRID -define internal i8 addrspace(200)* @test(i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %cap, i64 %offset) addrspace(200) nounwind { +define internal ptr addrspace(200) @test(ptr addrspace(200) %ptr, ptr addrspace(200) %cap, i64 %offset) addrspace(200) nounwind { ; PURECAP-LABEL: test: ; PURECAP: # %bb.0: # %entry ; PURECAP-NEXT: cfromptr $c1, $c4, $4 @@ -21,7 +21,7 @@ define internal i8 addrspace(200)* @test(i8 addrspace(200)* addrspace(200)* %ptr ; HYBRID-NEXT: csc $c1, $zero, 0($c3) ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: cmove $c3, $c1 -; CHECK-IR-LABEL: define {{[^@]+}}@test +; CHECK-IR-LABEL: define internal ptr addrspace(200) @test ; CHECK-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], ptr addrspace(200) [[CAP:%.*]], i64 [[OFFSET:%.*]]) addrspace(200) #[[ATTR0:[0-9]+]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: [[NEW:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.from.pointer.i64(ptr addrspace(200) [[CAP]], i64 [[OFFSET]]) @@ -29,13 +29,13 @@ define internal i8 addrspace(200)* @test(i8 addrspace(200)* addrspace(200)* %ptr ; CHECK-IR-NEXT: ret ptr addrspace(200) [[NEW]] ; entry: - %new = call i8 addrspace(200)* @llvm.cheri.cap.from.pointer.i64(i8 addrspace(200)* %cap, i64 %offset) - store i8 addrspace(200)* %new, i8 addrspace(200)* addrspace(200)* %ptr, align 16 - ret i8 addrspace(200)* %new + %new = call ptr addrspace(200) @llvm.cheri.cap.from.pointer.i64(ptr addrspace(200) %cap, i64 %offset) + store ptr addrspace(200) %new, ptr addrspace(200) %ptr, align 16 + ret ptr addrspace(200) %new } ;; (int_cheri_cap_from_ptr x, 0) -> null -define internal i8 addrspace(200)* @cap_from_ptr_zero(i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %cap) addrspace(200) nounwind { +define internal ptr addrspace(200) @cap_from_ptr_zero(ptr addrspace(200) %ptr, ptr addrspace(200) %cap) nounwind { ; PURECAP-LABEL: cap_from_ptr_zero: ; PURECAP: # %bb.0: # %entry ; PURECAP-NEXT: csc $cnull, $zero, 0($c3) @@ -47,20 +47,20 @@ define internal i8 addrspace(200)* @cap_from_ptr_zero(i8 addrspace(200)* addrspa ; HYBRID-NEXT: csc $cnull, $zero, 0($c3) ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: cgetnull $c3 -; CHECK-IR-LABEL: define {{[^@]+}}@cap_from_ptr_zero +; CHECK-IR-LABEL: define internal ptr addrspace(200) @cap_from_ptr_zero ; CHECK-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], ptr addrspace(200) [[CAP:%.*]]) addrspace(200) #[[ATTR0]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: store ptr addrspace(200) null, ptr addrspace(200) [[PTR]], align 16 ; CHECK-IR-NEXT: ret ptr addrspace(200) null ; entry: - %new = call i8 addrspace(200)* @llvm.cheri.cap.from.pointer.i64(i8 addrspace(200)* %cap, i64 0) - store i8 addrspace(200)* %new, i8 addrspace(200)* addrspace(200)* %ptr, align 16 - ret i8 addrspace(200)* %new + %new = call ptr addrspace(200) @llvm.cheri.cap.from.pointer.i64(ptr addrspace(200) %cap, i64 0) + store ptr addrspace(200) %new, ptr addrspace(200) %ptr, align 16 + ret ptr addrspace(200) %new } ;; Check that (int_cheri_cap_from_ptr ddc, x) can use the DDC register directly -define internal i8 addrspace(200)* @cap_from_ptr_ddc(i8 addrspace(200)* addrspace(200)* %ptr, i64 %offset) addrspace(200) nounwind { +define internal ptr addrspace(200) @cap_from_ptr_ddc(ptr addrspace(200) %ptr, i64 %offset) nounwind { ; PURECAP-LABEL: cap_from_ptr_ddc: ; PURECAP: # %bb.0: # %entry ; PURECAP-NEXT: cfromddc $c1, $4 @@ -74,7 +74,7 @@ define internal i8 addrspace(200)* @cap_from_ptr_ddc(i8 addrspace(200)* addrspac ; HYBRID-NEXT: csc $c1, $zero, 0($c3) ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: cmove $c3, $c1 -; CHECK-IR-LABEL: define {{[^@]+}}@cap_from_ptr_ddc +; CHECK-IR-LABEL: define internal ptr addrspace(200) @cap_from_ptr_ddc ; CHECK-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[OFFSET:%.*]]) addrspace(200) #[[ATTR0]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: [[DDC:%.*]] = call ptr addrspace(200) @llvm.cheri.ddc.get() @@ -83,14 +83,14 @@ define internal i8 addrspace(200)* @cap_from_ptr_ddc(i8 addrspace(200)* addrspac ; CHECK-IR-NEXT: ret ptr addrspace(200) [[NEW]] ; entry: - %ddc = call i8 addrspace(200)* @llvm.cheri.ddc.get() - %new = call i8 addrspace(200)* @llvm.cheri.cap.from.pointer.i64(i8 addrspace(200)* %ddc, i64 %offset) - store i8 addrspace(200)* %new, i8 addrspace(200)* addrspace(200)* %ptr, align 16 - ret i8 addrspace(200)* %new + %ddc = call ptr addrspace(200) @llvm.cheri.ddc.get() + %new = call ptr addrspace(200) @llvm.cheri.cap.from.pointer.i64(ptr addrspace(200) %ddc, i64 %offset) + store ptr addrspace(200) %new, ptr addrspace(200) %ptr, align 16 + ret ptr addrspace(200) %new } ;; Check that (int_cheri_cap_from_ptr x, 0) -> null has priority over direct DDC usage -define internal i8 addrspace(200)* @cap_from_ptr_ddc_zero(i8 addrspace(200)* addrspace(200)* %ptr) addrspace(200) nounwind { +define internal ptr addrspace(200) @cap_from_ptr_ddc_zero(ptr addrspace(200) %ptr) nounwind { ; PURECAP-LABEL: cap_from_ptr_ddc_zero: ; PURECAP: # %bb.0: # %entry ; PURECAP-NEXT: csc $cnull, $zero, 0($c3) @@ -102,21 +102,21 @@ define internal i8 addrspace(200)* @cap_from_ptr_ddc_zero(i8 addrspace(200)* add ; HYBRID-NEXT: csc $cnull, $zero, 0($c3) ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: cgetnull $c3 -; CHECK-IR-LABEL: define {{[^@]+}}@cap_from_ptr_ddc_zero +; CHECK-IR-LABEL: define internal ptr addrspace(200) @cap_from_ptr_ddc_zero ; CHECK-IR-SAME: (ptr addrspace(200) [[PTR:%.*]]) addrspace(200) #[[ATTR0]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: store ptr addrspace(200) null, ptr addrspace(200) [[PTR]], align 16 ; CHECK-IR-NEXT: ret ptr addrspace(200) null ; entry: - %ddc = call i8 addrspace(200)* @llvm.cheri.ddc.get() - %new = call i8 addrspace(200)* @llvm.cheri.cap.from.pointer.i64(i8 addrspace(200)* %ddc, i64 0) - store i8 addrspace(200)* %new, i8 addrspace(200)* addrspace(200)* %ptr, align 16 - ret i8 addrspace(200)* %new + %ddc = call ptr addrspace(200) @llvm.cheri.ddc.get() + %new = call ptr addrspace(200) @llvm.cheri.cap.from.pointer.i64(ptr addrspace(200) %ddc, i64 0) + store ptr addrspace(200) %new, ptr addrspace(200) %ptr, align 16 + ret ptr addrspace(200) %new } ;; Check that (int_cheri_cap_from_ptr null, x) does not use register zero (since that is DDC) -define internal i8 addrspace(200)* @cap_from_ptr_null(i8 addrspace(200)* addrspace(200)* %ptr, i64 %offset) addrspace(200) nounwind { +define internal ptr addrspace(200) @cap_from_ptr_null(ptr addrspace(200) %ptr, i64 %offset) nounwind { ; PURECAP-LABEL: cap_from_ptr_null: ; PURECAP: # %bb.0: # %entry ; PURECAP-NEXT: cgetnull $c1 @@ -132,7 +132,7 @@ define internal i8 addrspace(200)* @cap_from_ptr_null(i8 addrspace(200)* addrspa ; HYBRID-NEXT: csc $c1, $zero, 0($c3) ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: cmove $c3, $c1 -; CHECK-IR-LABEL: define {{[^@]+}}@cap_from_ptr_null +; CHECK-IR-LABEL: define internal ptr addrspace(200) @cap_from_ptr_null ; CHECK-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[OFFSET:%.*]]) addrspace(200) #[[ATTR0]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: [[NEW:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.from.pointer.i64(ptr addrspace(200) null, i64 [[OFFSET]]) @@ -140,10 +140,10 @@ define internal i8 addrspace(200)* @cap_from_ptr_null(i8 addrspace(200)* addrspa ; CHECK-IR-NEXT: ret ptr addrspace(200) [[NEW]] ; entry: - %new = call i8 addrspace(200)* @llvm.cheri.cap.from.pointer.i64(i8 addrspace(200)* null, i64 %offset) - store i8 addrspace(200)* %new, i8 addrspace(200)* addrspace(200)* %ptr, align 16 - ret i8 addrspace(200)* %new + %new = call ptr addrspace(200) @llvm.cheri.cap.from.pointer.i64(ptr addrspace(200) null, i64 %offset) + store ptr addrspace(200) %new, ptr addrspace(200) %ptr, align 16 + ret ptr addrspace(200) %new } -declare i8 addrspace(200)* @llvm.cheri.cap.from.pointer.i64(i8 addrspace(200)*, i64) -declare i8 addrspace(200)* @llvm.cheri.ddc.get() +declare ptr addrspace(200) @llvm.cheri.cap.from.pointer.i64(ptr addrspace(200), i64) +declare ptr addrspace(200) @llvm.cheri.ddc.get() diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/cheri-csub.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/cheri-csub.ll index e54270445f00e..2303095e540bc 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/cheri-csub.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/cheri-csub.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/cheri-csub.ll ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi n64 %s -o - | FileCheck %s --check-prefix=HYBRID ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap %s -o - | FileCheck %s --check-prefix=PURECAP diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/cheri-intrinsics-folding-broken-module-regression.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/cheri-intrinsics-folding-broken-module-regression.ll index 6af447ffa7aae..0fb65051c24d6 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/cheri-intrinsics-folding-broken-module-regression.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/cheri-intrinsics-folding-broken-module-regression.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 3 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/cheri-intrinsics-folding-broken-module-regression.ll ; This used to create a broken function. ; FIXME: the getoffset+add sequence should be folded to an increment diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/cheri-memfn-call.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/cheri-memfn-call.ll index 5a2f46cec882e..337932078cd9e 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/cheri-memfn-call.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/cheri-memfn-call.ll @@ -1,13 +1,13 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/cheri-memfn-call.ll ; Check that we call memset_c/memmove_c/memcpy_c in hybrid mode. ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap %s -o - | FileCheck %s --check-prefix=PURECAP ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi n64 %s -o - | FileCheck %s --check-prefix=HYBRID %struct.x = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } -declare void @llvm.memmove.p200i8.p200i8.i64(ptr addrspace(200) nocapture, ptr addrspace(200) nocapture readonly, i64, i1) -declare void @llvm.memset.p200i8.i64(ptr addrspace(200) nocapture, i8, i64, i1) -declare void @llvm.memcpy.p200i8.p200i8.i64(ptr addrspace(200) nocapture, ptr addrspace(200) nocapture readonly, i64, i1) +declare void @llvm.memmove.p200.p200.i64(ptr addrspace(200) nocapture, ptr addrspace(200) nocapture readonly, i64, i1) +declare void @llvm.memset.p200.i64(ptr addrspace(200) nocapture, i8, i64, i1) +declare void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) nocapture, ptr addrspace(200) nocapture readonly, i64, i1) define void @call_memset(ptr addrspace(200) align 4 %dst) nounwind { ; PURECAP-LABEL: call_memset: @@ -44,7 +44,7 @@ define void @call_memset(ptr addrspace(200) align 4 %dst) nounwind { ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: daddiu $sp, $sp, 16 entry: - call void @llvm.memset.p200i8.i64(ptr addrspace(200) align 4 %dst, i8 0, i64 40, i1 false) + call void @llvm.memset.p200.i64(ptr addrspace(200) align 4 %dst, i8 0, i64 40, i1 false) ret void } @@ -81,7 +81,7 @@ define void @call_memcpy(ptr addrspace(200) align 4 %dst, ptr addrspace(200) ali ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: daddiu $sp, $sp, 16 entry: - call void @llvm.memcpy.p200i8.p200i8.i64(ptr addrspace(200) align 4 %dst, ptr addrspace(200) align 4 %src, i64 40, i1 false) + call void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) align 4 %dst, ptr addrspace(200) align 4 %src, i64 40, i1 false) ret void } @@ -118,7 +118,7 @@ define void @call_memmove(ptr addrspace(200) align 4 %dst, ptr addrspace(200) al ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: daddiu $sp, $sp, 16 entry: - call void @llvm.memmove.p200i8.p200i8.i64(ptr addrspace(200) align 4 %dst, ptr addrspace(200) align 4 %src, i64 40, i1 false) + call void @llvm.memmove.p200.p200.i64(ptr addrspace(200) align 4 %dst, ptr addrspace(200) align 4 %src, i64 40, i1 false) ret void } diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/cheri-pointer-comparison.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/cheri-pointer-comparison.ll index e59c884c4e2bc..0636df51347c6 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/cheri-pointer-comparison.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/cheri-pointer-comparison.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/cheri-pointer-comparison.ll ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi n64 %s -o - | FileCheck %s --check-prefix=HYBRID ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap %s -o - | FileCheck %s --check-prefix=PURECAP diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/dagcombine-ptradd-deleted-regression.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/dagcombine-ptradd-deleted-regression.ll index e79c504621bc3..e87f1beed7c50 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/dagcombine-ptradd-deleted-regression.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/dagcombine-ptradd-deleted-regression.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/dagcombine-ptradd-deleted-regression.ll ; This would previously crash DAGCombiner::visitPTRADD since the PTRADD ; corresponding to the second GEP would be collapsed to a no-op when diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/function-alias-size.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/function-alias-size.ll index 9d51b2614eeee..a2d9952a5ec3e 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/function-alias-size.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/function-alias-size.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/function-alias-size.ll ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap %s -o - < %s | FileCheck %s --check-prefix=ASM ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap %s -o - -filetype=obj < %s | llvm-objdump --syms -r - | FileCheck %s --check-prefix=OBJDUMP diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/gvn-capability-store-to-load-fwd.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/gvn-capability-store-to-load-fwd.ll index a557a8a492c80..4bcbf4c120207 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/gvn-capability-store-to-load-fwd.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/gvn-capability-store-to-load-fwd.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/gvn-capability-store-to-load-fwd.ll ; Check that GVN does not attempt to read capability fields that it can't get the bits for ; This is https://github.com/CTSRD-CHERI/llvm-project/issues/385 @@ -18,11 +18,10 @@ target datalayout = "E-m:e-pf200:128:128:128:64-i8:8:32-i16:16:32-i64:64-n32:64-S128-A200-P200-G200" -%0 = type { i8, i8, [14 x i8] } -%struct.addrinfo = type { i32, i32, i32, i32, i32, i8 addrspace(200)*, %0 addrspace(200)*, %struct.addrinfo addrspace(200)* } +%struct.addrinfo = type { i32, i32, i32, i32, i32, ptr addrspace(200), ptr addrspace(200), ptr addrspace(200) } -define i32 @first_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_addr addrspace(200) nounwind { +define i32 @first_i32_store_to_load_fwd(ptr addrspace(200) %arg) local_unnamed_addr addrspace(200) nounwind { ; ASM-LABEL: first_i32_store_to_load_fwd: ; ASM: # %bb.0: ; ASM-NEXT: cincoffset $c11, $c11, -80 @@ -31,7 +30,7 @@ define i32 @first_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_a ; ASM-NEXT: cincoffset $c11, $c11, 80 ; ASM-NEXT: cjr $c17 ; ASM-NEXT: nop -; CHECK-LABEL: define {{[^@]+}}@first_i32_store_to_load_fwd +; CHECK-LABEL: define i32 @first_i32_store_to_load_fwd ; CHECK-SAME: (ptr addrspace(200) [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[STACKVAL:%.*]] = alloca [[STRUCT_ADDRINFO:%.*]], align 16, addrspace(200) ; CHECK-NEXT: store ptr addrspace(200) [[ARG]], ptr addrspace(200) [[STACKVAL]], align 16 @@ -39,14 +38,13 @@ define i32 @first_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_a ; CHECK-NEXT: ret i32 [[RESULT]] ; %stackval = alloca %struct.addrinfo, align 16, addrspace(200) - %field = getelementptr inbounds %struct.addrinfo, %struct.addrinfo addrspace(200)* %stackval, i64 0, i32 0 - %as_cap = bitcast %struct.addrinfo addrspace(200)* %stackval to i8 addrspace(200)* addrspace(200)* - store i8 addrspace(200)* %arg, i8 addrspace(200)* addrspace(200)* %as_cap, align 16 - %result = load i32, i32 addrspace(200)* %field, align 4 + %field = getelementptr inbounds %struct.addrinfo, ptr addrspace(200) %stackval, i64 0, i32 0 + store ptr addrspace(200) %arg, ptr addrspace(200) %stackval, align 16 + %result = load i32, ptr addrspace(200) %field, align 4 ret i32 %result } -define i32 @second_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_addr addrspace(200) nounwind { +define i32 @second_i32_store_to_load_fwd(ptr addrspace(200) %arg) local_unnamed_addr addrspace(200) nounwind { ; ASM-LABEL: second_i32_store_to_load_fwd: ; ASM: # %bb.0: ; ASM-NEXT: cincoffset $c11, $c11, -80 @@ -55,7 +53,7 @@ define i32 @second_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_ ; ASM-NEXT: cincoffset $c11, $c11, 80 ; ASM-NEXT: cjr $c17 ; ASM-NEXT: nop -; CHECK-LABEL: define {{[^@]+}}@second_i32_store_to_load_fwd +; CHECK-LABEL: define i32 @second_i32_store_to_load_fwd ; CHECK-SAME: (ptr addrspace(200) [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR0]] { ; CHECK-NEXT: [[STACKVAL:%.*]] = alloca [[STRUCT_ADDRINFO:%.*]], align 16, addrspace(200) ; CHECK-NEXT: [[FIELD:%.*]] = getelementptr inbounds [[STRUCT_ADDRINFO]], ptr addrspace(200) [[STACKVAL]], i64 0, i32 1 @@ -64,14 +62,13 @@ define i32 @second_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_ ; CHECK-NEXT: ret i32 [[RESULT]] ; %stackval = alloca %struct.addrinfo, align 16, addrspace(200) - %field = getelementptr inbounds %struct.addrinfo, %struct.addrinfo addrspace(200)* %stackval, i64 0, i32 1 - %as_cap = bitcast %struct.addrinfo addrspace(200)* %stackval to i8 addrspace(200)* addrspace(200)* - store i8 addrspace(200)* %arg, i8 addrspace(200)* addrspace(200)* %as_cap, align 16 - %result = load i32, i32 addrspace(200)* %field, align 4 + %field = getelementptr inbounds %struct.addrinfo, ptr addrspace(200) %stackval, i64 0, i32 1 + store ptr addrspace(200) %arg, ptr addrspace(200) %stackval, align 16 + %result = load i32, ptr addrspace(200) %field, align 4 ret i32 %result } -define i32 @third_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_addr addrspace(200) nounwind { +define i32 @third_i32_store_to_load_fwd(ptr addrspace(200) %arg) local_unnamed_addr addrspace(200) nounwind { ; ASM-LABEL: third_i32_store_to_load_fwd: ; ASM: # %bb.0: ; ASM-NEXT: cincoffset $c11, $c11, -80 @@ -80,7 +77,7 @@ define i32 @third_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_a ; ASM-NEXT: cincoffset $c11, $c11, 80 ; ASM-NEXT: cjr $c17 ; ASM-NEXT: nop -; CHECK-LABEL: define {{[^@]+}}@third_i32_store_to_load_fwd +; CHECK-LABEL: define i32 @third_i32_store_to_load_fwd ; CHECK-SAME: (ptr addrspace(200) [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR0]] { ; CHECK-NEXT: [[STACKVAL:%.*]] = alloca [[STRUCT_ADDRINFO:%.*]], align 16, addrspace(200) ; CHECK-NEXT: [[FIELD:%.*]] = getelementptr inbounds [[STRUCT_ADDRINFO]], ptr addrspace(200) [[STACKVAL]], i64 0, i32 2 @@ -89,14 +86,13 @@ define i32 @third_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_a ; CHECK-NEXT: ret i32 [[RESULT]] ; %stackval = alloca %struct.addrinfo, align 16, addrspace(200) - %field = getelementptr inbounds %struct.addrinfo, %struct.addrinfo addrspace(200)* %stackval, i64 0, i32 2 - %as_cap = bitcast %struct.addrinfo addrspace(200)* %stackval to i8 addrspace(200)* addrspace(200)* - store i8 addrspace(200)* %arg, i8 addrspace(200)* addrspace(200)* %as_cap, align 16 - %result = load i32, i32 addrspace(200)* %field, align 4 + %field = getelementptr inbounds %struct.addrinfo, ptr addrspace(200) %stackval, i64 0, i32 2 + store ptr addrspace(200) %arg, ptr addrspace(200) %stackval, align 16 + %result = load i32, ptr addrspace(200) %field, align 4 ret i32 %result } -define i32 @fourth_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_addr addrspace(200) nounwind { +define i32 @fourth_i32_store_to_load_fwd(ptr addrspace(200) %arg) local_unnamed_addr addrspace(200) nounwind { ; ASM-LABEL: fourth_i32_store_to_load_fwd: ; ASM: # %bb.0: ; ASM-NEXT: cincoffset $c11, $c11, -80 @@ -105,7 +101,7 @@ define i32 @fourth_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_ ; ASM-NEXT: cincoffset $c11, $c11, 80 ; ASM-NEXT: cjr $c17 ; ASM-NEXT: nop -; CHECK-LABEL: define {{[^@]+}}@fourth_i32_store_to_load_fwd +; CHECK-LABEL: define i32 @fourth_i32_store_to_load_fwd ; CHECK-SAME: (ptr addrspace(200) [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR0]] { ; CHECK-NEXT: [[STACKVAL:%.*]] = alloca [[STRUCT_ADDRINFO:%.*]], align 16, addrspace(200) ; CHECK-NEXT: [[FIELD:%.*]] = getelementptr inbounds [[STRUCT_ADDRINFO]], ptr addrspace(200) [[STACKVAL]], i64 0, i32 3 @@ -114,9 +110,8 @@ define i32 @fourth_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_ ; CHECK-NEXT: ret i32 [[RESULT]] ; %stackval = alloca %struct.addrinfo, align 16, addrspace(200) - %field = getelementptr inbounds %struct.addrinfo, %struct.addrinfo addrspace(200)* %stackval, i64 0, i32 3 - %as_cap = bitcast %struct.addrinfo addrspace(200)* %stackval to i8 addrspace(200)* addrspace(200)* - store i8 addrspace(200)* %arg, i8 addrspace(200)* addrspace(200)* %as_cap, align 16 - %result = load i32, i32 addrspace(200)* %field, align 4 + %field = getelementptr inbounds %struct.addrinfo, ptr addrspace(200) %stackval, i64 0, i32 3 + store ptr addrspace(200) %arg, ptr addrspace(200) %stackval, align 16 + %result = load i32, ptr addrspace(200) %field, align 4 ret i32 %result } diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/hoist-alloca.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/hoist-alloca.ll index 6c9145ea27288..9e4cb53774176 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/hoist-alloca.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/hoist-alloca.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/hoist-alloca.ll ; REQUIRES: asserts ; Check that we can hoist the csetbounds for a local alloca outside of loops diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/intrinsics-purecap-only.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/intrinsics-purecap-only.ll index efe65526d753e..b1abe4cd9e24b 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/intrinsics-purecap-only.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/intrinsics-purecap-only.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/intrinsics-purecap-only.ll ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap < %s -o - | FileCheck %s --check-prefix=PURECAP ; RUN: not --crash llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi n64 < %s -o - 2>&1 | FileCheck %s --check-prefix HYBRID-ERROR diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/intrinsics.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/intrinsics.ll index 37faa5a7858e5..e62da5c3a93ca 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/intrinsics.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/intrinsics.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/intrinsics.ll ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap %s -o - < %s | FileCheck %s --check-prefix=PURECAP ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi n64 -o - < %s | FileCheck %s --check-prefix=HYBRID diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/machinelicm-hoist-csetbounds.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/machinelicm-hoist-csetbounds.ll index 9042d02dc424b..f66c83f1d3f69 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/machinelicm-hoist-csetbounds.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/machinelicm-hoist-csetbounds.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 3 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/machinelicm-hoist-csetbounds.ll ; Previously LLVM would hoist CSetBounds instructions out of if conditions/loops ; even if the source pointer could be NULL. On MIPS and RISC-V this results in a @@ -78,22 +78,20 @@ define dso_local void @hoist_csetbounds(i32 signext %cond, ptr addrspace(200) %f ; HOIST-OPT-NEXT: br i1 [[TOBOOL]], label [[FOR_COND_CLEANUP:%.*]], label [[ENTRY_SPLIT:%.*]] ; HOIST-OPT: entry.split: ; HOIST-OPT-NEXT: [[DST:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(200) [[F]], i64 4 -; HOIST-OPT-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i64(ptr addrspace(200) nonnull [[F]], i64 4) -; HOIST-OPT-NEXT: [[TMP1:%.*]] = tail call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i64(ptr addrspace(200) nonnull [[DST]], i64 4) +; HOIST-OPT-NEXT: [[ADDRESS_WITH_BOUNDS:%.*]] = tail call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i64(ptr addrspace(200) nonnull [[F]], i64 4) +; HOIST-OPT-NEXT: [[ADDRESS_WITH_BOUNDS1:%.*]] = tail call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i64(ptr addrspace(200) nonnull [[DST]], i64 4) ; HOIST-OPT-NEXT: br label [[FOR_BODY:%.*]] ; HOIST-OPT: for.cond.cleanup: ; HOIST-OPT-NEXT: ret void ; HOIST-OPT: for.body: ; HOIST-OPT-NEXT: [[I_06:%.*]] = phi i32 [ 0, [[ENTRY_SPLIT]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; HOIST-OPT-NEXT: tail call void @call(ptr addrspace(200) [[TMP0]], ptr addrspace(200) [[TMP1]]) +; HOIST-OPT-NEXT: tail call void @call(ptr addrspace(200) [[ADDRESS_WITH_BOUNDS]], ptr addrspace(200) [[ADDRESS_WITH_BOUNDS1]]) ; HOIST-OPT-NEXT: [[INC]] = add nuw nsw i32 [[I_06]], 1 ; HOIST-OPT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], 100 ; HOIST-OPT-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] entry: %tobool = icmp eq ptr addrspace(200) %f, null - %0 = bitcast ptr addrspace(200) %f to ptr addrspace(200) %dst = getelementptr inbounds %struct.foo, ptr addrspace(200) %f, i64 0, i32 1 - %1 = bitcast ptr addrspace(200) %dst to ptr addrspace(200) br label %for.body for.cond.cleanup: ; preds = %for.inc @@ -104,10 +102,8 @@ for.body: ; preds = %for.inc, %entry br i1 %tobool, label %for.inc, label %if.then if.then: ; preds = %for.body - %2 = call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i64(ptr addrspace(200) nonnull %0, i64 4) - %address.with.bounds = bitcast ptr addrspace(200) %2 to ptr addrspace(200) - %3 = call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i64(ptr addrspace(200) nonnull %1, i64 4) - %address.with.bounds1 = bitcast ptr addrspace(200) %3 to ptr addrspace(200) + %address.with.bounds = call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i64(ptr addrspace(200) nonnull %f, i64 4) + %address.with.bounds1 = call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i64(ptr addrspace(200) nonnull %dst, i64 4) call void @call(ptr addrspace(200) %address.with.bounds, ptr addrspace(200) %address.with.bounds1) br label %for.inc diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/memcpy-from-constant.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/memcpy-from-constant.ll index fd4dd1a783852..9f7f18c1a8e66 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/memcpy-from-constant.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/memcpy-from-constant.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes --force-update +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/memcpy-from-constant.ll ;; Copying from a zero constant can be converted to a memset (even with the tag preservation flags) ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap < %s -o - | FileCheck %s diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/memcpy-no-preserve-tags-attr.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/memcpy-no-preserve-tags-attr.ll index f7d652b51381b..b97502b3f3e9b 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/memcpy-no-preserve-tags-attr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/memcpy-no-preserve-tags-attr.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/memcpy-no-preserve-tags-attr.ll ; Check that the no_preserve_tags annotation on memcpy/memmove intrinsics allows ; use to inline struct copies >= capability size. diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/memcpy-preserve-tags-size-not-multiple.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/memcpy-preserve-tags-size-not-multiple.ll index f3a7da84d2347..ba7b9e3ee3afb 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/memcpy-preserve-tags-size-not-multiple.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/memcpy-preserve-tags-size-not-multiple.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/memcpy-preserve-tags-size-not-multiple.ll ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap -o - -O0 -verify-machineinstrs %s | FileCheck %s -check-prefixes CHECK ; Check that we can inline memmove/memcpy despite having the must_preserve_cheri_tags property and the size not diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/memcpy-zeroinit.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/memcpy-zeroinit.ll index 55615e506f4ac..9d955dbdd9b49 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/memcpy-zeroinit.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/memcpy-zeroinit.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/memcpy-zeroinit.ll ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap < %s -o - | FileCheck %s ; Check that the copy from the zeroinitializer global is turned into a series of zero stores diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/optsize-preserve-tags-memcpy-crash.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/optsize-preserve-tags-memcpy-crash.ll index aafd522343d54..08ea9a3ba505d 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/optsize-preserve-tags-memcpy-crash.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/optsize-preserve-tags-memcpy-crash.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/optsize-preserve-tags-memcpy-crash.ll ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap < %s -o - | FileCheck %s ; The following code copying 31 bytes (with capability alignment) using the diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/ptrtoint.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/ptrtoint.ll index 7be4cade3dcf0..fff632d6991e5 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/ptrtoint.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/ptrtoint.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/ptrtoint.ll ;; Check that we can correctly generate code for ptrtoint and perform simple folds ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap < %s | FileCheck %s diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/setoffset-multiple-uses.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/setoffset-multiple-uses.ll index 29ee6b675afa1..09c9056ebe9f3 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/setoffset-multiple-uses.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/setoffset-multiple-uses.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 3 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/setoffset-multiple-uses.ll ; RUN: opt -S -passes=instcombine -o - %s | FileCheck %s ; RUN: opt -S -passes=instcombine -o - %s | llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap -O1 - -o - | %cheri_FileCheck %s --check-prefix ASM diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/stack-bounds-dynamic-alloca.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/stack-bounds-dynamic-alloca.ll index 8a2a59775ebb3..ad66cccd32dc7 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/stack-bounds-dynamic-alloca.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/stack-bounds-dynamic-alloca.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/stack-bounds-dynamic-alloca.ll ; RUN: opt -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap -passes=cheri-bound-allocas -o - -S %s | FileCheck %s ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap -O0 %s -o - | FileCheck %s -check-prefix ASM @@ -79,7 +79,7 @@ define i32 @alloca_in_entry(i1 %arg) local_unnamed_addr addrspace(200) nounwind ; ASM-OPT-NEXT: .LBB0_2: # %exit ; ASM-OPT-NEXT: cjr $c17 ; ASM-OPT-NEXT: addiu $2, $zero, 123 -; CHECK-LABEL: define {{[^@]+}}@alloca_in_entry +; CHECK-LABEL: define i32 @alloca_in_entry ; CHECK-SAME: (i1 [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ALLOCA:%.*]] = alloca [16 x i8], align 16, addrspace(200) @@ -87,8 +87,7 @@ define i32 @alloca_in_entry(i1 %arg) local_unnamed_addr addrspace(200) nounwind ; CHECK: do_alloca: ; CHECK-NEXT: br label [[USE_ALLOCA_NO_BOUNDS:%.*]] ; CHECK: use_alloca_no_bounds: -; CHECK-NEXT: [[PTR:%.*]] = bitcast ptr addrspace(200) [[ALLOCA]] to ptr addrspace(200) -; CHECK-NEXT: [[PTR_PLUS_ONE:%.*]] = getelementptr i64, ptr addrspace(200) [[PTR]], i64 1 +; CHECK-NEXT: [[PTR_PLUS_ONE:%.*]] = getelementptr i64, ptr addrspace(200) [[ALLOCA]], i64 1 ; CHECK-NEXT: store i64 1234, ptr addrspace(200) [[PTR_PLUS_ONE]], align 8 ; CHECK-NEXT: br label [[USE_ALLOCA_NEED_BOUNDS:%.*]] ; CHECK: use_alloca_need_bounds: @@ -107,8 +106,7 @@ do_alloca: ; preds = %entry br label %use_alloca_no_bounds use_alloca_no_bounds: ; preds = %do_alloca - %ptr = bitcast ptr addrspace(200) %alloca to ptr addrspace(200) - %ptr_plus_one = getelementptr i64, ptr addrspace(200) %ptr, i64 1 + %ptr_plus_one = getelementptr i64, ptr addrspace(200) %alloca, i64 1 store i64 1234, ptr addrspace(200) %ptr_plus_one, align 8 br label %use_alloca_need_bounds @@ -204,7 +202,7 @@ define i32 @alloca_not_in_entry(i1 %arg) local_unnamed_addr addrspace(200) nounw ; ASM-OPT-NEXT: .LBB1_2: # %exit ; ASM-OPT-NEXT: cjr $c17 ; ASM-OPT-NEXT: addiu $2, $zero, 123 -; CHECK-LABEL: define {{[^@]+}}@alloca_not_in_entry +; CHECK-LABEL: define i32 @alloca_not_in_entry ; CHECK-SAME: (i1 [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 [[ARG]], label [[DO_ALLOCA:%.*]], label [[EXIT:%.*]] @@ -213,8 +211,7 @@ define i32 @alloca_not_in_entry(i1 %arg) local_unnamed_addr addrspace(200) nounw ; CHECK-NEXT: [[TMP0:%.*]] = call ptr addrspace(200) @llvm.cheri.bounded.stack.cap.dynamic.i64(ptr addrspace(200) [[ALLOCA]], i64 16) ; CHECK-NEXT: br label [[USE_ALLOCA_NO_BOUNDS:%.*]] ; CHECK: use_alloca_no_bounds: -; CHECK-NEXT: [[PTR:%.*]] = bitcast ptr addrspace(200) [[ALLOCA]] to ptr addrspace(200) -; CHECK-NEXT: [[PTR_PLUS_ONE:%.*]] = getelementptr i64, ptr addrspace(200) [[PTR]], i64 1 +; CHECK-NEXT: [[PTR_PLUS_ONE:%.*]] = getelementptr i64, ptr addrspace(200) [[ALLOCA]], i64 1 ; CHECK-NEXT: store i64 1234, ptr addrspace(200) [[PTR_PLUS_ONE]], align 8 ; CHECK-NEXT: br label [[USE_ALLOCA_NEED_BOUNDS:%.*]] ; CHECK: use_alloca_need_bounds: @@ -232,8 +229,7 @@ do_alloca: ; preds = %entry br label %use_alloca_no_bounds use_alloca_no_bounds: ; preds = %do_alloca - %ptr = bitcast ptr addrspace(200) %alloca to ptr addrspace(200) - %ptr_plus_one = getelementptr i64, ptr addrspace(200) %ptr, i64 1 + %ptr_plus_one = getelementptr i64, ptr addrspace(200) %alloca, i64 1 store i64 1234, ptr addrspace(200) %ptr_plus_one, align 8 br label %use_alloca_need_bounds @@ -327,7 +323,7 @@ define i32 @crash_reproducer(i1 %arg) local_unnamed_addr addrspace(200) nounwind ; ASM-OPT-NEXT: cincoffset $c11, $c11, 32 ; ASM-OPT-NEXT: .LBB2_2: # %entry.while.end_crit_edge ; ASM-OPT-NEXT: .insn -; CHECK-LABEL: define {{[^@]+}}@crash_reproducer +; CHECK-LABEL: define i32 @crash_reproducer ; CHECK-SAME: (i1 [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 [[ARG]], label [[ENTRY_WHILE_END_CRIT_EDGE:%.*]], label [[WHILE_BODY:%.*]] diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/stack-bounds-opaque-spill-too-early.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/stack-bounds-opaque-spill-too-early.ll index 953ff82776765..faecff108e53c 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/stack-bounds-opaque-spill-too-early.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/stack-bounds-opaque-spill-too-early.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/stack-bounds-opaque-spill-too-early.ll ;; After merging to LLVM 15 the stack bounds pass the switch to opqaue pointers caused ;; miscompilations in the stack bounding pass (the unbounded value was used instead of @@ -54,7 +54,7 @@ define dso_local void @lazy_bind_args() addrspace(200) nounwind { ; ASM-NEXT: clc $c18, $zero, 32($c11) # 16-byte Folded Reload ; ASM-NEXT: cjr $c17 ; ASM-NEXT: cincoffset $c11, $c11, 48 -; CHECK-LABEL: define {{[^@]+}}@lazy_bind_args +; CHECK-LABEL: define dso_local void @lazy_bind_args ; CHECK-SAME: () addrspace(200) #[[ATTR2:[0-9]+]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CAP:%.*]] = alloca ptr addrspace(200), align 16, addrspace(200) diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/stack-bounds-pass-phi.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/stack-bounds-pass-phi.ll index 38a0147fff16f..01f926c3570e4 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/stack-bounds-pass-phi.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/stack-bounds-pass-phi.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/stack-bounds-pass-phi.ll ; REQUIRES: asserts ; RUN: opt -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap -passes=cheri-bound-allocas %s -o - -S -cheri-stack-bounds=if-needed \ @@ -58,7 +58,7 @@ define void @test_phi(i1 %cond) addrspace(200) nounwind { ; ASM-NEXT: clc $c19, $zero, 48($c11) # 16-byte Folded Reload ; ASM-NEXT: cjr $c17 ; ASM-NEXT: cincoffset $c11, $c11, 64 -; CHECK-LABEL: define {{[^@]+}}@test_phi +; CHECK-LABEL: define void @test_phi ; CHECK-SAME: (i1 [[COND:%.*]]) addrspace(200) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ALLOCA1:%.*]] = alloca i32, align 4, addrspace(200) @@ -142,7 +142,7 @@ define void @test_only_created_in_predecessor_block(i1 %cond) addrspace(200) nou ; ASM-NEXT: clc $c17, $zero, 16($c11) # 16-byte Folded Reload ; ASM-NEXT: cjr $c17 ; ASM-NEXT: cincoffset $c11, $c11, 32 -; CHECK-LABEL: define {{[^@]+}}@test_only_created_in_predecessor_block +; CHECK-LABEL: define void @test_only_created_in_predecessor_block ; CHECK-SAME: (i1 [[COND:%.*]]) addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ALLOCA1:%.*]] = alloca i32, align 4, addrspace(200) diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/stack-spill-unnecessary.c.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/stack-spill-unnecessary.c.ll index 3aa7fe30332a6..3d7a5da60a70e 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/stack-spill-unnecessary.c.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/stack-spill-unnecessary.c.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/stack-spill-unnecessary.c.ll ; The new CheriBoundedStackPseudo instruction lets us pretend that the incoffset+csetbounds ; is a single trivially rematerizable instruction so it can freely move it around to avoid stack spills. diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/stackframe-intrinsics.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/stackframe-intrinsics.ll index 1d75463f20ee8..d3cd3cc4c8c07 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/stackframe-intrinsics.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/stackframe-intrinsics.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/stackframe-intrinsics.ll ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap %s -o - < %s | FileCheck %s --check-prefix=PURECAP ; RUN: sed 's/addrspace(200)/addrspace(0)/g' %s | llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi n64 | FileCheck %s --check-prefix HYBRID diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/strcpy-to-memcpy-no-tags.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/strcpy-to-memcpy-no-tags.ll index cde99a48495f8..3708b25a65462 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/strcpy-to-memcpy-no-tags.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/strcpy-to-memcpy-no-tags.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/strcpy-to-memcpy-no-tags.ll ; Check that we can inline the loads/stores generated when simplifiying ; string libcalls to memcpy() (since it should be marked as non-tag-preserving). @@ -31,7 +31,7 @@ define void @test_strcpy_to_memcpy(ptr addrspace(200) align 8 %dst) addrspace(20 ; CHECK-ASM-NEXT: csb $2, $zero, 16($c3) ; CHECK-ASM-NEXT: cjr $c17 ; CHECK-ASM-NEXT: csd $3, $zero, 8($c3) -; CHECK-IR-LABEL: define {{[^@]+}}@test_strcpy_to_memcpy +; CHECK-IR-LABEL: define void @test_strcpy_to_memcpy ; CHECK-IR-SAME: (ptr addrspace(200) align 8 [[DST:%.*]]) addrspace(200) #[[ATTR1:[0-9]+]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: call void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) noundef nonnull align 8 dereferenceable(17) [[DST]], ptr addrspace(200) noundef nonnull align 8 dereferenceable(17) @str, i64 17, i1 false) #[[ATTR4:[0-9]+]] @@ -56,7 +56,7 @@ define void @test_stpcpy_to_memcpy(ptr addrspace(200) align 8 %dst) addrspace(20 ; CHECK-ASM-NEXT: csb $2, $zero, 16($c3) ; CHECK-ASM-NEXT: cjr $c17 ; CHECK-ASM-NEXT: csd $3, $zero, 8($c3) -; CHECK-IR-LABEL: define {{[^@]+}}@test_stpcpy_to_memcpy +; CHECK-IR-LABEL: define void @test_stpcpy_to_memcpy ; CHECK-IR-SAME: (ptr addrspace(200) align 8 [[DST:%.*]]) addrspace(200) #[[ATTR1]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: call void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) noundef nonnull align 8 dereferenceable(17) [[DST]], ptr addrspace(200) noundef nonnull align 8 dereferenceable(17) @str, i64 17, i1 false) #[[ATTR5:[0-9]+]] @@ -90,7 +90,7 @@ define void @test_strcat_to_memcpy(ptr addrspace(200) align 8 %dst) addrspace(20 ; CHECK-ASM-NEXT: clc $c19, $zero, 32($c11) # 16-byte Folded Reload ; CHECK-ASM-NEXT: cjr $c17 ; CHECK-ASM-NEXT: cincoffset $c11, $c11, 48 -; CHECK-IR-LABEL: define {{[^@]+}}@test_strcat_to_memcpy +; CHECK-IR-LABEL: define void @test_strcat_to_memcpy ; CHECK-IR-SAME: (ptr addrspace(200) align 8 [[DST:%.*]]) addrspace(200) #[[ATTR1]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: [[STRLEN:%.*]] = call i64 @strlen(ptr addrspace(200) noundef nonnull dereferenceable(1) [[DST]]) @@ -118,7 +118,7 @@ define void @test_strncpy_to_memcpy(ptr addrspace(200) align 8 %dst) addrspace(2 ; CHECK-ASM-NEXT: csb $2, $zero, 16($c3) ; CHECK-ASM-NEXT: cjr $c17 ; CHECK-ASM-NEXT: csd $3, $zero, 8($c3) -; CHECK-IR-LABEL: define {{[^@]+}}@test_strncpy_to_memcpy +; CHECK-IR-LABEL: define void @test_strncpy_to_memcpy ; CHECK-IR-SAME: (ptr addrspace(200) align 8 [[DST:%.*]]) addrspace(200) #[[ATTR1]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: call void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) noundef nonnull align 8 dereferenceable(17) [[DST]], ptr addrspace(200) noundef nonnull align 8 dereferenceable(17) @str, i64 17, i1 false) #[[ATTR4]] @@ -144,7 +144,7 @@ define void @test_stpncpy_to_memcpy(ptr addrspace(200) align 8 %dst) addrspace(2 ; CHECK-ASM-NEXT: csb $2, $zero, 16($c3) ; CHECK-ASM-NEXT: cjr $c17 ; CHECK-ASM-NEXT: csd $3, $zero, 8($c3) -; CHECK-IR-LABEL: define {{[^@]+}}@test_stpncpy_to_memcpy +; CHECK-IR-LABEL: define void @test_stpncpy_to_memcpy ; CHECK-IR-SAME: (ptr addrspace(200) align 8 [[DST:%.*]]) addrspace(200) #[[ATTR1]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: call void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) noundef nonnull align 8 dereferenceable(17) [[DST]], ptr addrspace(200) noundef nonnull align 8 dereferenceable(17) @str, i64 17, i1 false) #[[ATTR4]] diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/subobject-bounds-redundant-setbounds.c.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/subobject-bounds-redundant-setbounds.c.ll index 9208d5457eb3b..95ee41380e46d 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/subobject-bounds-redundant-setbounds.c.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/subobject-bounds-redundant-setbounds.c.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/subobject-bounds-redundant-setbounds.c.ll ; REQUIRES: asserts ; RUN: rm -f %t.dbg-opt %t.dbg-llc @@ -40,7 +40,7 @@ define void @use_inline(ptr addrspace(200) nocapture %arg) local_unnamed_addr ad ; ASM-NEXT: addiu $1, $zero, 2 ; ASM-NEXT: cjr $c17 ; ASM-NEXT: csw $1, $zero, 0($c3) -; CHECK-LABEL: define {{[^@]+}}@use_inline +; CHECK-LABEL: define void @use_inline ; CHECK-SAME: (ptr addrspace(200) nocapture [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: store i32 2, ptr addrspace(200) [[ARG]], align 4 ; CHECK-NEXT: ret void @@ -68,7 +68,7 @@ define signext i32 @stack_array() local_unnamed_addr addrspace(200) nounwind { ; ASM-NEXT: clc $c18, $zero, 64($c11) # 16-byte Folded Reload ; ASM-NEXT: cjr $c17 ; ASM-NEXT: cincoffset $c11, $c11, 80 -; CHECK-LABEL: define {{[^@]+}}@stack_array +; CHECK-LABEL: define signext i32 @stack_array ; CHECK-SAME: () local_unnamed_addr addrspace(200) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: [[ARRAY:%.*]] = alloca [10 x i32], align 4, addrspace(200) ; CHECK-NEXT: call void @llvm.lifetime.start.p200(i64 40, ptr addrspace(200) nonnull [[ARRAY]]) @@ -123,7 +123,7 @@ define signext i32 @stack_int() local_unnamed_addr addrspace(200) nounwind { ; ASM-NEXT: clc $c17, $zero, 16($c11) # 16-byte Folded Reload ; ASM-NEXT: cjr $c17 ; ASM-NEXT: cincoffset $c11, $c11, 32 -; CHECK-LABEL: define {{[^@]+}}@stack_int +; CHECK-LABEL: define signext i32 @stack_int ; CHECK-SAME: () local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: [[VALUE:%.*]] = alloca i32, align 4, addrspace(200) ; CHECK-NEXT: call void @llvm.lifetime.start.p200(i64 4, ptr addrspace(200) nonnull [[VALUE]]) @@ -171,7 +171,7 @@ define signext i32 @stack_int_inlined() local_unnamed_addr addrspace(200) nounwi ; ASM-NEXT: clw $2, $zero, 12($c11) ; ASM-NEXT: cjr $c17 ; ASM-NEXT: cincoffset $c11, $c11, 16 -; CHECK-LABEL: define {{[^@]+}}@stack_int_inlined +; CHECK-LABEL: define signext i32 @stack_int_inlined ; CHECK-SAME: () local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: [[VALUE:%.*]] = alloca i32, align 4, addrspace(200) ; CHECK-NEXT: call void @llvm.lifetime.start.p200(i64 4, ptr addrspace(200) nonnull [[VALUE]]) @@ -218,7 +218,7 @@ define signext i32 @out_of_bounds_setbounds() local_unnamed_addr addrspace(200) ; ASM-NEXT: clw $2, $zero, 12($c11) ; ASM-NEXT: cjr $c17 ; ASM-NEXT: cincoffset $c11, $c11, 16 -; CHECK-LABEL: define {{[^@]+}}@out_of_bounds_setbounds +; CHECK-LABEL: define signext i32 @out_of_bounds_setbounds ; CHECK-SAME: () local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: [[VALUE:%.*]] = alloca i32, align 4, addrspace(200) ; CHECK-NEXT: [[TMP1:%.*]] = call ptr addrspace(200) @llvm.cheri.bounded.stack.cap.i64(ptr addrspace(200) [[VALUE]], i64 4) @@ -264,7 +264,7 @@ define signext i32 @setbounds_escapes() local_unnamed_addr addrspace(200) nounwi ; ASM-NEXT: clc $c17, $zero, 16($c11) # 16-byte Folded Reload ; ASM-NEXT: cjr $c17 ; ASM-NEXT: cincoffset $c11, $c11, 32 -; CHECK-LABEL: define {{[^@]+}}@setbounds_escapes +; CHECK-LABEL: define signext i32 @setbounds_escapes ; CHECK-SAME: () local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: [[VALUE:%.*]] = alloca i32, align 4, addrspace(200) ; CHECK-NEXT: [[ADDRESS_WITH_BOUNDS:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i64(ptr addrspace(200) nonnull [[VALUE]], i64 4) @@ -300,7 +300,7 @@ define void @assume_aligned() local_unnamed_addr addrspace(200) nounwind { ; ASM-NEXT: csw $1, $zero, 12($c11) ; ASM-NEXT: cjr $c17 ; ASM-NEXT: cincoffset $c11, $c11, 16 -; CHECK-LABEL: define {{[^@]+}}@assume_aligned +; CHECK-LABEL: define void @assume_aligned ; CHECK-SAME: () local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = alloca [4 x i8], align 4, addrspace(200) ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr addrspace(200) [[TMP1]], i64 4) ] diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/trunc-load.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/trunc-load.ll index fdf8251231105..7a92c982e48b7 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/trunc-load.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/trunc-load.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/trunc-load.ll ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap %s -o - < %s | FileCheck %s --check-prefix=PURECAP ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi n64 -o - < %s | FileCheck %s --check-prefix=HYBRID diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/unaligned-loads-stores-hybrid.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/unaligned-loads-stores-hybrid.ll index 62b2a7293706a..6cb876167c59a 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/unaligned-loads-stores-hybrid.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/unaligned-loads-stores-hybrid.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/unaligned-loads-stores-hybrid.ll ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi n64 %s -o - | FileCheck %s diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/unaligned-loads-stores-purecap.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/unaligned-loads-stores-purecap.ll index 81e5d4abe5830..6c7823aa53353 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/unaligned-loads-stores-purecap.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/unaligned-loads-stores-purecap.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/unaligned-loads-stores-purecap.ll ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap %s -o - | FileCheck %s diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-rmw-cap-ptr-arg.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-rmw-cap-ptr-arg.ll index 013d756034b51..d73508ec61c2f 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-rmw-cap-ptr-arg.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-rmw-cap-ptr-arg.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/atomic-rmw-cap-ptr-arg.ll ; Check that we can generate sensible code for atomic operations using capability pointers on capabilities ; See https://github.com/CTSRD-CHERI/llvm-project/issues/470 diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/bounded-allocas-lifetimes.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/bounded-allocas-lifetimes.ll index c02fa3190d14a..9edc4d128277d 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/bounded-allocas-lifetimes.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/bounded-allocas-lifetimes.ll @@ -1,5 +1,6 @@ -; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/bounded-allocas-lifetimes.ll +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; CHERI-GENERIC-UTC: mir ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f %s -o - --stop-after=finalize-isel | FileCheck %s diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cap-from-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cap-from-ptr.ll index 979c816cc3260..09e1fc3c5f0cc 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cap-from-ptr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cap-from-ptr.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/cap-from-ptr.ll ;; Check that we can correctly generate code for llvm.cheri.cap.from.pointer() ;; This previously asserted on RISC-V due to a broken ISel pattern. @@ -7,17 +7,29 @@ ; RUN: opt -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f -passes=instcombine -S < %s | llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f | FileCheck %s --check-prefix=PURECAP ; RUN: opt -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f -passes=instcombine -S < %s | llc -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f | FileCheck %s --check-prefix=HYBRID -define internal i8 addrspace(200)* @test(i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %cap, i32 %offset) addrspace(200) nounwind { +define internal ptr addrspace(200) @test(ptr addrspace(200) %ptr, ptr addrspace(200) %cap, i32 %offset) addrspace(200) nounwind { ; PURECAP-LABEL: test: ; PURECAP: # %bb.0: # %entry -; PURECAP-NEXT: cfromptr ca1, ca1, a2 +; PURECAP-NEXT: bnez a2, .LBB0_2 +; PURECAP-NEXT: # %bb.1: # %entry +; PURECAP-NEXT: cmove ca1, cnull +; PURECAP-NEXT: j .LBB0_3 +; PURECAP-NEXT: .LBB0_2: +; PURECAP-NEXT: csetaddr ca1, ca1, a2 +; PURECAP-NEXT: .LBB0_3: # %entry ; PURECAP-NEXT: csc ca1, 0(ca0) ; PURECAP-NEXT: cmove ca0, ca1 ; PURECAP-NEXT: cret ; ; HYBRID-LABEL: test: ; HYBRID: # %bb.0: # %entry -; HYBRID-NEXT: cfromptr ca1, ca1, a2 +; HYBRID-NEXT: bnez a2, .LBB0_2 +; HYBRID-NEXT: # %bb.1: # %entry +; HYBRID-NEXT: cmove ca1, cnull +; HYBRID-NEXT: j .LBB0_3 +; HYBRID-NEXT: .LBB0_2: +; HYBRID-NEXT: csetaddr ca1, ca1, a2 +; HYBRID-NEXT: .LBB0_3: # %entry ; HYBRID-NEXT: sc.cap ca1, (ca0) ; HYBRID-NEXT: cmove ca0, ca1 ; HYBRID-NEXT: ret @@ -29,13 +41,13 @@ define internal i8 addrspace(200)* @test(i8 addrspace(200)* addrspace(200)* %ptr ; CHECK-IR-NEXT: ret ptr addrspace(200) [[NEW]] ; entry: - %new = call i8 addrspace(200)* @llvm.cheri.cap.from.pointer.i32(i8 addrspace(200)* %cap, i32 %offset) - store i8 addrspace(200)* %new, i8 addrspace(200)* addrspace(200)* %ptr, align 16 - ret i8 addrspace(200)* %new + %new = call ptr addrspace(200) @llvm.cheri.cap.from.pointer.i32(ptr addrspace(200) %cap, i32 %offset) + store ptr addrspace(200) %new, ptr addrspace(200) %ptr, align 16 + ret ptr addrspace(200) %new } ;; (int_cheri_cap_from_ptr x, 0) -> null -define internal i8 addrspace(200)* @cap_from_ptr_zero(i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %cap) addrspace(200) nounwind { +define internal ptr addrspace(200) @cap_from_ptr_zero(ptr addrspace(200) %ptr, ptr addrspace(200) %cap) addrspace(200) nounwind { ; PURECAP-LABEL: cap_from_ptr_zero: ; PURECAP: # %bb.0: # %entry ; PURECAP-NEXT: csc cnull, 0(ca0) @@ -47,30 +59,44 @@ define internal i8 addrspace(200)* @cap_from_ptr_zero(i8 addrspace(200)* addrspa ; HYBRID-NEXT: sc.cap cnull, (ca0) ; HYBRID-NEXT: cmove ca0, cnull ; HYBRID-NEXT: ret -; CHECK-IR-LABEL: define {{[^@]+}}@cap_from_ptr_zero +; CHECK-IR-LABEL: define internal ptr addrspace(200) @cap_from_ptr_zero ; CHECK-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], ptr addrspace(200) [[CAP:%.*]]) #[[ATTR0]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: store ptr addrspace(200) null, ptr addrspace(200) [[PTR]], align 16 ; CHECK-IR-NEXT: ret ptr addrspace(200) null ; entry: - %new = call i8 addrspace(200)* @llvm.cheri.cap.from.pointer.i32(i8 addrspace(200)* %cap, i32 0) - store i8 addrspace(200)* %new, i8 addrspace(200)* addrspace(200)* %ptr, align 16 - ret i8 addrspace(200)* %new + %new = call ptr addrspace(200) @llvm.cheri.cap.from.pointer.i32(ptr addrspace(200) %cap, i32 0) + store ptr addrspace(200) %new, ptr addrspace(200) %ptr, align 16 + ret ptr addrspace(200) %new } ;; Check that (int_cheri_cap_from_ptr ddc, x) can use the DDC register directly -define internal i8 addrspace(200)* @cap_from_ptr_ddc(i8 addrspace(200)* addrspace(200)* %ptr, i32 %offset) addrspace(200) nounwind { +define internal ptr addrspace(200) @cap_from_ptr_ddc(ptr addrspace(200) %ptr, i32 %offset) addrspace(200) nounwind { ; PURECAP-LABEL: cap_from_ptr_ddc: ; PURECAP: # %bb.0: # %entry -; PURECAP-NEXT: cfromptr ca1, ddc, a1 +; PURECAP-NEXT: cspecialr ca2, ddc +; PURECAP-NEXT: bnez a1, .LBB2_2 +; PURECAP-NEXT: # %bb.1: # %entry +; PURECAP-NEXT: cmove ca1, cnull +; PURECAP-NEXT: j .LBB2_3 +; PURECAP-NEXT: .LBB2_2: +; PURECAP-NEXT: csetaddr ca1, ca2, a1 +; PURECAP-NEXT: .LBB2_3: # %entry ; PURECAP-NEXT: csc ca1, 0(ca0) ; PURECAP-NEXT: cmove ca0, ca1 ; PURECAP-NEXT: cret ; ; HYBRID-LABEL: cap_from_ptr_ddc: ; HYBRID: # %bb.0: # %entry -; HYBRID-NEXT: cfromptr ca1, ddc, a1 +; HYBRID-NEXT: cspecialr ca2, ddc +; HYBRID-NEXT: bnez a1, .LBB2_2 +; HYBRID-NEXT: # %bb.1: # %entry +; HYBRID-NEXT: cmove ca1, cnull +; HYBRID-NEXT: j .LBB2_3 +; HYBRID-NEXT: .LBB2_2: +; HYBRID-NEXT: csetaddr ca1, ca2, a1 +; HYBRID-NEXT: .LBB2_3: # %entry ; HYBRID-NEXT: sc.cap ca1, (ca0) ; HYBRID-NEXT: cmove ca0, ca1 ; HYBRID-NEXT: ret @@ -83,14 +109,14 @@ define internal i8 addrspace(200)* @cap_from_ptr_ddc(i8 addrspace(200)* addrspac ; CHECK-IR-NEXT: ret ptr addrspace(200) [[NEW]] ; entry: - %ddc = call i8 addrspace(200)* @llvm.cheri.ddc.get() - %new = call i8 addrspace(200)* @llvm.cheri.cap.from.pointer.i32(i8 addrspace(200)* %ddc, i32 %offset) - store i8 addrspace(200)* %new, i8 addrspace(200)* addrspace(200)* %ptr, align 16 - ret i8 addrspace(200)* %new + %ddc = call ptr addrspace(200) @llvm.cheri.ddc.get() + %new = call ptr addrspace(200) @llvm.cheri.cap.from.pointer.i32(ptr addrspace(200) %ddc, i32 %offset) + store ptr addrspace(200) %new, ptr addrspace(200) %ptr, align 16 + ret ptr addrspace(200) %new } ;; Check that (int_cheri_cap_from_ptr x, 0) -> null has priority over direct DDC usage -define internal i8 addrspace(200)* @cap_from_ptr_ddc_zero(i8 addrspace(200)* addrspace(200)* %ptr) addrspace(200) nounwind { +define internal ptr addrspace(200) @cap_from_ptr_ddc_zero(ptr addrspace(200) %ptr) addrspace(200) nounwind { ; PURECAP-LABEL: cap_from_ptr_ddc_zero: ; PURECAP: # %bb.0: # %entry ; PURECAP-NEXT: csc cnull, 0(ca0) @@ -109,26 +135,36 @@ define internal i8 addrspace(200)* @cap_from_ptr_ddc_zero(i8 addrspace(200)* add ; CHECK-IR-NEXT: ret ptr addrspace(200) null ; entry: - %ddc = call i8 addrspace(200)* @llvm.cheri.ddc.get() - %new = call i8 addrspace(200)* @llvm.cheri.cap.from.pointer.i32(i8 addrspace(200)* %ddc, i32 0) - store i8 addrspace(200)* %new, i8 addrspace(200)* addrspace(200)* %ptr, align 16 - ret i8 addrspace(200)* %new + %ddc = call ptr addrspace(200) @llvm.cheri.ddc.get() + %new = call ptr addrspace(200) @llvm.cheri.cap.from.pointer.i32(ptr addrspace(200) %ddc, i32 0) + store ptr addrspace(200) %new, ptr addrspace(200) %ptr, align 16 + ret ptr addrspace(200) %new } ;; Check that (int_cheri_cap_from_ptr null, x) does not use register zero (since that is DDC) -define internal i8 addrspace(200)* @cap_from_ptr_null(i8 addrspace(200)* addrspace(200)* %ptr, i32 %offset) addrspace(200) nounwind { +define internal ptr addrspace(200) @cap_from_ptr_null(ptr addrspace(200) %ptr, i32 %offset) addrspace(200) nounwind { ; PURECAP-LABEL: cap_from_ptr_null: ; PURECAP: # %bb.0: # %entry -; PURECAP-NEXT: cmove ca2, cnull -; PURECAP-NEXT: cfromptr ca1, ca2, a1 +; PURECAP-NEXT: bnez a1, .LBB4_2 +; PURECAP-NEXT: # %bb.1: # %entry +; PURECAP-NEXT: cmove ca1, cnull +; PURECAP-NEXT: j .LBB4_3 +; PURECAP-NEXT: .LBB4_2: +; PURECAP-NEXT: csetaddr ca1, cnull, a1 +; PURECAP-NEXT: .LBB4_3: # %entry ; PURECAP-NEXT: csc ca1, 0(ca0) ; PURECAP-NEXT: cmove ca0, ca1 ; PURECAP-NEXT: cret ; ; HYBRID-LABEL: cap_from_ptr_null: ; HYBRID: # %bb.0: # %entry -; HYBRID-NEXT: cmove ca2, cnull -; HYBRID-NEXT: cfromptr ca1, ca2, a1 +; HYBRID-NEXT: bnez a1, .LBB4_2 +; HYBRID-NEXT: # %bb.1: # %entry +; HYBRID-NEXT: cmove ca1, cnull +; HYBRID-NEXT: j .LBB4_3 +; HYBRID-NEXT: .LBB4_2: +; HYBRID-NEXT: csetaddr ca1, cnull, a1 +; HYBRID-NEXT: .LBB4_3: # %entry ; HYBRID-NEXT: sc.cap ca1, (ca0) ; HYBRID-NEXT: cmove ca0, ca1 ; HYBRID-NEXT: ret @@ -140,10 +176,10 @@ define internal i8 addrspace(200)* @cap_from_ptr_null(i8 addrspace(200)* addrspa ; CHECK-IR-NEXT: ret ptr addrspace(200) [[NEW]] ; entry: - %new = call i8 addrspace(200)* @llvm.cheri.cap.from.pointer.i32(i8 addrspace(200)* null, i32 %offset) - store i8 addrspace(200)* %new, i8 addrspace(200)* addrspace(200)* %ptr, align 16 - ret i8 addrspace(200)* %new + %new = call ptr addrspace(200) @llvm.cheri.cap.from.pointer.i32(ptr addrspace(200) null, i32 %offset) + store ptr addrspace(200) %new, ptr addrspace(200) %ptr, align 16 + ret ptr addrspace(200) %new } -declare i8 addrspace(200)* @llvm.cheri.cap.from.pointer.i32(i8 addrspace(200)*, i32) -declare i8 addrspace(200)* @llvm.cheri.ddc.get() +declare ptr addrspace(200) @llvm.cheri.cap.from.pointer.i32(ptr addrspace(200), i32) +declare ptr addrspace(200) @llvm.cheri.ddc.get() diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cheri-csub.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cheri-csub.ll index aa436654b75d4..cb9c2ad24a1c8 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cheri-csub.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cheri-csub.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/cheri-csub.ll ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f %s -o - | FileCheck %s --check-prefix=HYBRID ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f %s -o - | FileCheck %s --check-prefix=PURECAP diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cheri-intrinsics-folding-broken-module-regression.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cheri-intrinsics-folding-broken-module-regression.ll index 250609215e643..f3db990f090e9 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cheri-intrinsics-folding-broken-module-regression.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cheri-intrinsics-folding-broken-module-regression.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/cheri-intrinsics-folding-broken-module-regression.ll ; This used to create a broken function. ; FIXME: the getoffset+add sequence should be folded to an increment diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cheri-memfn-call.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cheri-memfn-call.ll index 0988e06605568..bd889a8f9400e 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cheri-memfn-call.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cheri-memfn-call.ll @@ -1,13 +1,13 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/cheri-memfn-call.ll ; Check that we call memset_c/memmove_c/memcpy_c in hybrid mode. ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f %s -o - | FileCheck %s --check-prefix=PURECAP ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f %s -o - | FileCheck %s --check-prefix=HYBRID %struct.x = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } -declare void @llvm.memmove.p200i8.p200i8.i64(ptr addrspace(200) nocapture, ptr addrspace(200) nocapture readonly, i64, i1) -declare void @llvm.memset.p200i8.i64(ptr addrspace(200) nocapture, i8, i64, i1) -declare void @llvm.memcpy.p200i8.p200i8.i64(ptr addrspace(200) nocapture, ptr addrspace(200) nocapture readonly, i64, i1) +declare void @llvm.memmove.p200.p200.i64(ptr addrspace(200) nocapture, ptr addrspace(200) nocapture readonly, i64, i1) +declare void @llvm.memset.p200.i64(ptr addrspace(200) nocapture, i8, i64, i1) +declare void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) nocapture, ptr addrspace(200) nocapture readonly, i64, i1) define void @call_memset(ptr addrspace(200) align 4 %dst) nounwind { ; PURECAP-LABEL: call_memset: @@ -34,7 +34,7 @@ define void @call_memset(ptr addrspace(200) align 4 %dst) nounwind { ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret entry: - call void @llvm.memset.p200i8.i64(ptr addrspace(200) align 4 %dst, i8 0, i64 40, i1 false) + call void @llvm.memset.p200.i64(ptr addrspace(200) align 4 %dst, i8 0, i64 40, i1 false) ret void } @@ -61,7 +61,7 @@ define void @call_memcpy(ptr addrspace(200) align 4 %dst, ptr addrspace(200) ali ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret entry: - call void @llvm.memcpy.p200i8.p200i8.i64(ptr addrspace(200) align 4 %dst, ptr addrspace(200) align 4 %src, i64 40, i1 false) + call void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) align 4 %dst, ptr addrspace(200) align 4 %src, i64 40, i1 false) ret void } @@ -88,7 +88,7 @@ define void @call_memmove(ptr addrspace(200) align 4 %dst, ptr addrspace(200) al ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret entry: - call void @llvm.memmove.p200i8.p200i8.i64(ptr addrspace(200) align 4 %dst, ptr addrspace(200) align 4 %src, i64 40, i1 false) + call void @llvm.memmove.p200.p200.i64(ptr addrspace(200) align 4 %dst, ptr addrspace(200) align 4 %src, i64 40, i1 false) ret void } diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cheri-pointer-comparison.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cheri-pointer-comparison.ll index 5a452c492e2ea..7e2ba46e5dcc3 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cheri-pointer-comparison.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cheri-pointer-comparison.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/cheri-pointer-comparison.ll ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f %s -o - | FileCheck %s --check-prefix=HYBRID ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f %s -o - | FileCheck %s --check-prefix=PURECAP diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/dagcombine-ptradd-deleted-regression.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/dagcombine-ptradd-deleted-regression.ll index 0a1ed19197c71..755d4c029456c 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/dagcombine-ptradd-deleted-regression.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/dagcombine-ptradd-deleted-regression.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/dagcombine-ptradd-deleted-regression.ll ; This would previously crash DAGCombiner::visitPTRADD since the PTRADD ; corresponding to the second GEP would be collapsed to a no-op when diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/function-alias-size.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/function-alias-size.ll index 16fd1d90dbb00..9253e4da149fa 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/function-alias-size.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/function-alias-size.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/function-alias-size.ll ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f %s -o - < %s | FileCheck %s --check-prefix=ASM ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f %s -o - -filetype=obj < %s | llvm-objdump --syms -r - | FileCheck %s --check-prefix=OBJDUMP diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/gvn-capability-store-to-load-fwd.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/gvn-capability-store-to-load-fwd.ll index a15f6644514f2..8b1e3e464bda0 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/gvn-capability-store-to-load-fwd.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/gvn-capability-store-to-load-fwd.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/gvn-capability-store-to-load-fwd.ll ; Check that GVN does not attempt to read capability fields that it can't get the bits for ; This is https://github.com/CTSRD-CHERI/llvm-project/issues/385 @@ -18,11 +18,10 @@ target datalayout = "e-m:e-pf200:64:64:64:32-p:32:32-i64:64-n32-S128-A200-P200-G200" -%0 = type { i8, i8, [14 x i8] } -%struct.addrinfo = type { i32, i32, i32, i32, i32, i8 addrspace(200)*, %0 addrspace(200)*, %struct.addrinfo addrspace(200)* } +%struct.addrinfo = type { i32, i32, i32, i32, i32, ptr addrspace(200), ptr addrspace(200), ptr addrspace(200) } -define i32 @first_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_addr addrspace(200) nounwind { +define i32 @first_i32_store_to_load_fwd(ptr addrspace(200) %arg) local_unnamed_addr addrspace(200) nounwind { ; ASM-LABEL: first_i32_store_to_load_fwd: ; ASM: # %bb.0: ; ASM-NEXT: cincoffset csp, csp, -48 @@ -30,7 +29,7 @@ define i32 @first_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_a ; ASM-NEXT: clw a0, 0(csp) ; ASM-NEXT: cincoffset csp, csp, 48 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@first_i32_store_to_load_fwd +; CHECK-LABEL: define i32 @first_i32_store_to_load_fwd ; CHECK-SAME: (ptr addrspace(200) [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[STACKVAL:%.*]] = alloca [[STRUCT_ADDRINFO:%.*]], align 8, addrspace(200) ; CHECK-NEXT: store ptr addrspace(200) [[ARG]], ptr addrspace(200) [[STACKVAL]], align 8 @@ -38,14 +37,13 @@ define i32 @first_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_a ; CHECK-NEXT: ret i32 [[RESULT]] ; %stackval = alloca %struct.addrinfo, align 8, addrspace(200) - %field = getelementptr inbounds %struct.addrinfo, %struct.addrinfo addrspace(200)* %stackval, i64 0, i32 0 - %as_cap = bitcast %struct.addrinfo addrspace(200)* %stackval to i8 addrspace(200)* addrspace(200)* - store i8 addrspace(200)* %arg, i8 addrspace(200)* addrspace(200)* %as_cap, align 8 - %result = load i32, i32 addrspace(200)* %field, align 4 + %field = getelementptr inbounds %struct.addrinfo, ptr addrspace(200) %stackval, i64 0, i32 0 + store ptr addrspace(200) %arg, ptr addrspace(200) %stackval, align 8 + %result = load i32, ptr addrspace(200) %field, align 4 ret i32 %result } -define i32 @second_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_addr addrspace(200) nounwind { +define i32 @second_i32_store_to_load_fwd(ptr addrspace(200) %arg) local_unnamed_addr addrspace(200) nounwind { ; ASM-LABEL: second_i32_store_to_load_fwd: ; ASM: # %bb.0: ; ASM-NEXT: cincoffset csp, csp, -48 @@ -53,7 +51,7 @@ define i32 @second_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_ ; ASM-NEXT: clw a0, 4(csp) ; ASM-NEXT: cincoffset csp, csp, 48 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@second_i32_store_to_load_fwd +; CHECK-LABEL: define i32 @second_i32_store_to_load_fwd ; CHECK-SAME: (ptr addrspace(200) [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR0]] { ; CHECK-NEXT: [[STACKVAL:%.*]] = alloca [[STRUCT_ADDRINFO:%.*]], align 8, addrspace(200) ; CHECK-NEXT: [[FIELD:%.*]] = getelementptr inbounds [[STRUCT_ADDRINFO]], ptr addrspace(200) [[STACKVAL]], i64 0, i32 1 @@ -62,14 +60,13 @@ define i32 @second_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_ ; CHECK-NEXT: ret i32 [[RESULT]] ; %stackval = alloca %struct.addrinfo, align 8, addrspace(200) - %field = getelementptr inbounds %struct.addrinfo, %struct.addrinfo addrspace(200)* %stackval, i64 0, i32 1 - %as_cap = bitcast %struct.addrinfo addrspace(200)* %stackval to i8 addrspace(200)* addrspace(200)* - store i8 addrspace(200)* %arg, i8 addrspace(200)* addrspace(200)* %as_cap, align 8 - %result = load i32, i32 addrspace(200)* %field, align 4 + %field = getelementptr inbounds %struct.addrinfo, ptr addrspace(200) %stackval, i64 0, i32 1 + store ptr addrspace(200) %arg, ptr addrspace(200) %stackval, align 8 + %result = load i32, ptr addrspace(200) %field, align 4 ret i32 %result } -define i32 @third_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_addr addrspace(200) nounwind { +define i32 @third_i32_store_to_load_fwd(ptr addrspace(200) %arg) local_unnamed_addr addrspace(200) nounwind { ; ASM-LABEL: third_i32_store_to_load_fwd: ; ASM: # %bb.0: ; ASM-NEXT: cincoffset csp, csp, -48 @@ -77,7 +74,7 @@ define i32 @third_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_a ; ASM-NEXT: # implicit-def: $x10 ; ASM-NEXT: cincoffset csp, csp, 48 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@third_i32_store_to_load_fwd +; CHECK-LABEL: define i32 @third_i32_store_to_load_fwd ; CHECK-SAME: (ptr addrspace(200) [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR0]] { ; CHECK-NEXT: [[STACKVAL:%.*]] = alloca [[STRUCT_ADDRINFO:%.*]], align 8, addrspace(200) ; CHECK-NEXT: [[FIELD:%.*]] = getelementptr inbounds [[STRUCT_ADDRINFO]], ptr addrspace(200) [[STACKVAL]], i64 0, i32 2 @@ -85,14 +82,13 @@ define i32 @third_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_a ; CHECK-NEXT: ret i32 undef ; %stackval = alloca %struct.addrinfo, align 8, addrspace(200) - %field = getelementptr inbounds %struct.addrinfo, %struct.addrinfo addrspace(200)* %stackval, i64 0, i32 2 - %as_cap = bitcast %struct.addrinfo addrspace(200)* %stackval to i8 addrspace(200)* addrspace(200)* - store i8 addrspace(200)* %arg, i8 addrspace(200)* addrspace(200)* %as_cap, align 8 - %result = load i32, i32 addrspace(200)* %field, align 4 + %field = getelementptr inbounds %struct.addrinfo, ptr addrspace(200) %stackval, i64 0, i32 2 + store ptr addrspace(200) %arg, ptr addrspace(200) %stackval, align 8 + %result = load i32, ptr addrspace(200) %field, align 4 ret i32 %result } -define i32 @fourth_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_addr addrspace(200) nounwind { +define i32 @fourth_i32_store_to_load_fwd(ptr addrspace(200) %arg) local_unnamed_addr addrspace(200) nounwind { ; ASM-LABEL: fourth_i32_store_to_load_fwd: ; ASM: # %bb.0: ; ASM-NEXT: cincoffset csp, csp, -48 @@ -100,7 +96,7 @@ define i32 @fourth_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_ ; ASM-NEXT: # implicit-def: $x10 ; ASM-NEXT: cincoffset csp, csp, 48 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@fourth_i32_store_to_load_fwd +; CHECK-LABEL: define i32 @fourth_i32_store_to_load_fwd ; CHECK-SAME: (ptr addrspace(200) [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR0]] { ; CHECK-NEXT: [[STACKVAL:%.*]] = alloca [[STRUCT_ADDRINFO:%.*]], align 8, addrspace(200) ; CHECK-NEXT: [[FIELD:%.*]] = getelementptr inbounds [[STRUCT_ADDRINFO]], ptr addrspace(200) [[STACKVAL]], i64 0, i32 3 @@ -108,9 +104,8 @@ define i32 @fourth_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_ ; CHECK-NEXT: ret i32 undef ; %stackval = alloca %struct.addrinfo, align 8, addrspace(200) - %field = getelementptr inbounds %struct.addrinfo, %struct.addrinfo addrspace(200)* %stackval, i64 0, i32 3 - %as_cap = bitcast %struct.addrinfo addrspace(200)* %stackval to i8 addrspace(200)* addrspace(200)* - store i8 addrspace(200)* %arg, i8 addrspace(200)* addrspace(200)* %as_cap, align 8 - %result = load i32, i32 addrspace(200)* %field, align 4 + %field = getelementptr inbounds %struct.addrinfo, ptr addrspace(200) %stackval, i64 0, i32 3 + store ptr addrspace(200) %arg, ptr addrspace(200) %stackval, align 8 + %result = load i32, ptr addrspace(200) %field, align 4 ret i32 %result } diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/hoist-alloca.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/hoist-alloca.ll index d262844484b5a..3c7e370dfca94 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/hoist-alloca.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/hoist-alloca.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/hoist-alloca.ll ; REQUIRES: asserts ; Check that we can hoist the csetbounds for a local alloca outside of loops diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/intrinsics-purecap-only.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/intrinsics-purecap-only.ll index be7392e25298d..966c57697072a 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/intrinsics-purecap-only.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/intrinsics-purecap-only.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/intrinsics-purecap-only.ll ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f < %s -o - | FileCheck %s --check-prefix=PURECAP ; RUN: not --crash llc -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f < %s -o - 2>&1 | FileCheck %s --check-prefix HYBRID-ERROR diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/intrinsics.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/intrinsics.ll index cab52c0ccc526..15475227394c8 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/intrinsics.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/intrinsics.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/intrinsics.ll ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f %s -o - < %s | FileCheck %s --check-prefix=PURECAP ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f -o - < %s | FileCheck %s --check-prefix=HYBRID @@ -416,12 +416,16 @@ declare i8 addrspace(200)* @llvm.cheri.pcc.get() define i32 @to_pointer(i8 addrspace(200)* %cap1, i8 addrspace(200)* %cap2) nounwind { ; PURECAP-LABEL: to_pointer: ; PURECAP: # %bb.0: -; PURECAP-NEXT: ctoptr a0, ca0, ca1 +; PURECAP-NEXT: cgettag a0, ca1 +; PURECAP-NEXT: neg a0, a0 +; PURECAP-NEXT: and a0, a1, a0 ; PURECAP-NEXT: cret ; ; HYBRID-LABEL: to_pointer: ; HYBRID: # %bb.0: -; HYBRID-NEXT: ctoptr a0, ca0, ca1 +; HYBRID-NEXT: cgettag a0, ca1 +; HYBRID-NEXT: neg a0, a0 +; HYBRID-NEXT: and a0, a1, a0 ; HYBRID-NEXT: ret %ptr = call i32 @llvm.cheri.cap.to.pointer(i8 addrspace(200)* %cap1, i8 addrspace(200)* %cap2) ret i32 %ptr @@ -430,12 +434,16 @@ define i32 @to_pointer(i8 addrspace(200)* %cap1, i8 addrspace(200)* %cap2) nounw define i32 @to_pointer_ddc_relative(i8 addrspace(200)* %cap) nounwind { ; PURECAP-LABEL: to_pointer_ddc_relative: ; PURECAP: # %bb.0: -; PURECAP-NEXT: ctoptr a0, ca0, ddc +; PURECAP-NEXT: cgettag a1, ca0 +; PURECAP-NEXT: neg a1, a1 +; PURECAP-NEXT: and a0, a0, a1 ; PURECAP-NEXT: cret ; ; HYBRID-LABEL: to_pointer_ddc_relative: ; HYBRID: # %bb.0: -; HYBRID-NEXT: ctoptr a0, ca0, ddc +; HYBRID-NEXT: cgettag a1, ca0 +; HYBRID-NEXT: neg a1, a1 +; HYBRID-NEXT: and a0, a0, a1 ; HYBRID-NEXT: ret %ddc = call i8 addrspace(200)* @llvm.cheri.ddc.get() %ptr = call i32 @llvm.cheri.cap.to.pointer(i8 addrspace(200)* %ddc, i8 addrspace(200)* %cap) @@ -445,12 +453,22 @@ define i32 @to_pointer_ddc_relative(i8 addrspace(200)* %cap) nounwind { define i8 addrspace(200)* @from_pointer(i8 addrspace(200)* %cap, i32 %ptr) nounwind { ; PURECAP-LABEL: from_pointer: ; PURECAP: # %bb.0: -; PURECAP-NEXT: cfromptr ca0, ca0, a1 +; PURECAP-NEXT: bnez a1, .LBB28_2 +; PURECAP-NEXT: # %bb.1: +; PURECAP-NEXT: cmove ca0, cnull +; PURECAP-NEXT: cret +; PURECAP-NEXT: .LBB28_2: +; PURECAP-NEXT: csetaddr ca0, ca0, a1 ; PURECAP-NEXT: cret ; ; HYBRID-LABEL: from_pointer: ; HYBRID: # %bb.0: -; HYBRID-NEXT: cfromptr ca0, ca0, a1 +; HYBRID-NEXT: bnez a1, .LBB28_2 +; HYBRID-NEXT: # %bb.1: +; HYBRID-NEXT: cmove ca0, cnull +; HYBRID-NEXT: ret +; HYBRID-NEXT: .LBB28_2: +; HYBRID-NEXT: csetaddr ca0, ca0, a1 ; HYBRID-NEXT: ret %newcap = call i8 addrspace(200)* @llvm.cheri.cap.from.pointer(i8 addrspace(200)* %cap, i32 %ptr) ret i8 addrspace(200)* %newcap @@ -459,12 +477,24 @@ define i8 addrspace(200)* @from_pointer(i8 addrspace(200)* %cap, i32 %ptr) nounw define i8 addrspace(200)* @from_ddc(i32 %ptr) nounwind { ; PURECAP-LABEL: from_ddc: ; PURECAP: # %bb.0: -; PURECAP-NEXT: cfromptr ca0, ddc, a0 +; PURECAP-NEXT: cspecialr ca1, ddc +; PURECAP-NEXT: bnez a0, .LBB29_2 +; PURECAP-NEXT: # %bb.1: +; PURECAP-NEXT: cmove ca0, cnull +; PURECAP-NEXT: cret +; PURECAP-NEXT: .LBB29_2: +; PURECAP-NEXT: csetaddr ca0, ca1, a0 ; PURECAP-NEXT: cret ; ; HYBRID-LABEL: from_ddc: ; HYBRID: # %bb.0: -; HYBRID-NEXT: cfromptr ca0, ddc, a0 +; HYBRID-NEXT: cspecialr ca1, ddc +; HYBRID-NEXT: bnez a0, .LBB29_2 +; HYBRID-NEXT: # %bb.1: +; HYBRID-NEXT: cmove ca0, cnull +; HYBRID-NEXT: ret +; HYBRID-NEXT: .LBB29_2: +; HYBRID-NEXT: csetaddr ca0, ca1, a0 ; HYBRID-NEXT: ret %ddc = call i8 addrspace(200)* @llvm.cheri.ddc.get() %cap = call i8 addrspace(200)* @llvm.cheri.cap.from.pointer(i8 addrspace(200)* %ddc, i32 %ptr) diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/machinelicm-hoist-csetbounds.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/machinelicm-hoist-csetbounds.ll index 05cb8f03a1849..8a3416bbba208 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/machinelicm-hoist-csetbounds.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/machinelicm-hoist-csetbounds.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 3 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/machinelicm-hoist-csetbounds.ll ; Previously LLVM would hoist CSetBounds instructions out of if conditions/loops ; even if the source pointer could be NULL. On MIPS and RISC-V this results in a @@ -38,22 +38,25 @@ define dso_local void @hoist_csetbounds(i32 signext %cond, ptr addrspace(200) %f ; CHECK-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill ; CHECK-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill ; CHECK-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill +; CHECK-NEXT: csc cs4, 0(csp) # 8-byte Folded Spill ; CHECK-NEXT: cmove cs0, ca1 -; CHECK-NEXT: cincoffset cs1, ca1, 4 -; CHECK-NEXT: li s2, -1 -; CHECK-NEXT: li s3, 99 +; CHECK-NEXT: cincoffset ca0, ca1, 4 +; CHECK-NEXT: li s3, -1 +; CHECK-NEXT: li s4, 99 +; CHECK-NEXT: csetbounds cs2, ca1, 4 +; CHECK-NEXT: csetbounds cs1, ca0, 4 ; CHECK-NEXT: j .LBB0_2 ; CHECK-NEXT: .LBB0_1: # %for.inc ; CHECK-NEXT: # in Loop: Header=BB0_2 Depth=1 -; CHECK-NEXT: addi s2, s2, 1 -; CHECK-NEXT: bgeu s2, s3, .LBB0_4 +; CHECK-NEXT: addi s3, s3, 1 +; CHECK-NEXT: bgeu s3, s4, .LBB0_4 ; CHECK-NEXT: .LBB0_2: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: beqz s0, .LBB0_1 ; CHECK-NEXT: # %bb.3: # %if.then ; CHECK-NEXT: # in Loop: Header=BB0_2 Depth=1 -; CHECK-NEXT: csetbounds ca0, cs0, 4 -; CHECK-NEXT: csetbounds ca1, cs1, 4 +; CHECK-NEXT: cmove ca0, cs2 +; CHECK-NEXT: cmove ca1, cs1 ; CHECK-NEXT: ccall call ; CHECK-NEXT: j .LBB0_1 ; CHECK-NEXT: .LBB0_4: # %for.cond.cleanup @@ -62,6 +65,7 @@ define dso_local void @hoist_csetbounds(i32 signext %cond, ptr addrspace(200) %f ; CHECK-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload ; CHECK-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload ; CHECK-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload +; CHECK-NEXT: clc cs4, 0(csp) # 8-byte Folded Reload ; CHECK-NEXT: cincoffset csp, csp, 48 ; CHECK-NEXT: cret ; HOIST-OPT-LABEL: define dso_local void @hoist_csetbounds @@ -71,23 +75,21 @@ define dso_local void @hoist_csetbounds(i32 signext %cond, ptr addrspace(200) %f ; HOIST-OPT-NEXT: br i1 [[TOBOOL]], label [[FOR_COND_CLEANUP:%.*]], label [[ENTRY_SPLIT:%.*]] ; HOIST-OPT: entry.split: ; HOIST-OPT-NEXT: [[DST:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(200) [[F]], i32 4 -; HOIST-OPT-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i32(ptr addrspace(200) nonnull [[F]], i32 4) -; HOIST-OPT-NEXT: [[TMP1:%.*]] = tail call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i32(ptr addrspace(200) nonnull [[DST]], i32 4) +; HOIST-OPT-NEXT: [[ADDRESS_WITH_BOUNDS:%.*]] = tail call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i32(ptr addrspace(200) nonnull [[F]], i32 4) +; HOIST-OPT-NEXT: [[ADDRESS_WITH_BOUNDS1:%.*]] = tail call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i32(ptr addrspace(200) nonnull [[DST]], i32 4) ; HOIST-OPT-NEXT: br label [[FOR_BODY:%.*]] ; HOIST-OPT: for.cond.cleanup: ; HOIST-OPT-NEXT: ret void ; HOIST-OPT: for.body: ; HOIST-OPT-NEXT: [[I_06:%.*]] = phi i32 [ 0, [[ENTRY_SPLIT]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; HOIST-OPT-NEXT: tail call void @call(ptr addrspace(200) [[TMP0]], ptr addrspace(200) [[TMP1]]) +; HOIST-OPT-NEXT: tail call void @call(ptr addrspace(200) [[ADDRESS_WITH_BOUNDS]], ptr addrspace(200) [[ADDRESS_WITH_BOUNDS1]]) ; HOIST-OPT-NEXT: [[INC]] = add nuw nsw i32 [[I_06]], 1 ; HOIST-OPT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], 100 ; HOIST-OPT-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; entry: %tobool = icmp eq ptr addrspace(200) %f, null - %0 = bitcast ptr addrspace(200) %f to ptr addrspace(200) %dst = getelementptr inbounds %struct.foo, ptr addrspace(200) %f, i64 0, i32 1 - %1 = bitcast ptr addrspace(200) %dst to ptr addrspace(200) br label %for.body for.cond.cleanup: ; preds = %for.inc @@ -98,10 +100,8 @@ for.body: ; preds = %for.inc, %entry br i1 %tobool, label %for.inc, label %if.then if.then: ; preds = %for.body - %2 = call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i32(ptr addrspace(200) nonnull %0, i32 4) - %address.with.bounds = bitcast ptr addrspace(200) %2 to ptr addrspace(200) - %3 = call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i32(ptr addrspace(200) nonnull %1, i32 4) - %address.with.bounds1 = bitcast ptr addrspace(200) %3 to ptr addrspace(200) + %address.with.bounds = call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i32(ptr addrspace(200) nonnull %f, i32 4) + %address.with.bounds1 = call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i32(ptr addrspace(200) nonnull %dst, i32 4) call void @call(ptr addrspace(200) %address.with.bounds, ptr addrspace(200) %address.with.bounds1) br label %for.inc diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/memcpy-from-constant.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/memcpy-from-constant.ll index 323d1e2fd94d7..3c3b6520ae129 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/memcpy-from-constant.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/memcpy-from-constant.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes --force-update +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/memcpy-from-constant.ll ;; Copying from a zero constant can be converted to a memset (even with the tag preservation flags) ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f < %s -o - | FileCheck %s diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/memcpy-no-preserve-tags-attr.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/memcpy-no-preserve-tags-attr.ll index 2e4a0ad52d4b1..de4c319a5e30e 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/memcpy-no-preserve-tags-attr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/memcpy-no-preserve-tags-attr.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/memcpy-no-preserve-tags-attr.ll ; Check that the no_preserve_tags annotation on memcpy/memmove intrinsics allows ; use to inline struct copies >= capability size. diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/memcpy-preserve-tags-size-not-multiple.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/memcpy-preserve-tags-size-not-multiple.ll index 7bc355b16c296..f7a75ef9d1152 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/memcpy-preserve-tags-size-not-multiple.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/memcpy-preserve-tags-size-not-multiple.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/memcpy-preserve-tags-size-not-multiple.ll ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f -o - -O0 -verify-machineinstrs %s | FileCheck %s -check-prefixes CHECK ; Check that we can inline memmove/memcpy despite having the must_preserve_cheri_tags property and the size not diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/memcpy-zeroinit.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/memcpy-zeroinit.ll index 085cd89a6a3c5..382655b6ed467 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/memcpy-zeroinit.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/memcpy-zeroinit.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/memcpy-zeroinit.ll ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f < %s -o - | FileCheck %s ; Check that the copy from the zeroinitializer global is turned into a series of zero stores diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/optsize-preserve-tags-memcpy-crash.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/optsize-preserve-tags-memcpy-crash.ll index bd5404c0411f0..87421de9676b3 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/optsize-preserve-tags-memcpy-crash.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/optsize-preserve-tags-memcpy-crash.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/optsize-preserve-tags-memcpy-crash.ll ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f < %s -o - | FileCheck %s ; The following code copying 31 bytes (with capability alignment) using the diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/ptrtoint.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/ptrtoint.ll index 223d569b12fad..fd7aede769115 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/ptrtoint.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/ptrtoint.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/ptrtoint.ll ;; Check that we can correctly generate code for ptrtoint and perform simple folds ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f < %s | FileCheck %s @@ -12,7 +12,9 @@ define internal i32 @ptrtoint(i8 addrspace(200)* %cap) addrspace(200) nounwind { ; ; HYBRID-LABEL: ptrtoint: ; HYBRID: # %bb.0: -; HYBRID-NEXT: ctoptr a0, ca0, ddc +; HYBRID-NEXT: cgettag a1, ca0 +; HYBRID-NEXT: neg a1, a1 +; HYBRID-NEXT: and a0, a0, a1 ; HYBRID-NEXT: ret %ret = ptrtoint i8 addrspace(200)* %cap to i32 ret i32 %ret @@ -26,7 +28,9 @@ define internal i32 @ptrtoint_plus_const(i8 addrspace(200)* %cap) addrspace(200) ; ; HYBRID-LABEL: ptrtoint_plus_const: ; HYBRID: # %bb.0: -; HYBRID-NEXT: ctoptr a0, ca0, ddc +; HYBRID-NEXT: cgettag a1, ca0 +; HYBRID-NEXT: neg a1, a1 +; HYBRID-NEXT: and a0, a0, a1 ; HYBRID-NEXT: addi a0, a0, 2 ; HYBRID-NEXT: ret %zero = ptrtoint i8 addrspace(200)* %cap to i32 @@ -42,7 +46,9 @@ define internal i32 @ptrtoint_plus_var(i8 addrspace(200)* %cap, i32 %add) addrsp ; ; HYBRID-LABEL: ptrtoint_plus_var: ; HYBRID: # %bb.0: -; HYBRID-NEXT: ctoptr a0, ca0, ddc +; HYBRID-NEXT: cgettag a2, ca0 +; HYBRID-NEXT: neg a2, a2 +; HYBRID-NEXT: and a0, a0, a2 ; HYBRID-NEXT: add a0, a0, a1 ; HYBRID-NEXT: ret %zero = ptrtoint i8 addrspace(200)* %cap to i32 @@ -58,7 +64,9 @@ define internal i32 @ptrtoint_null() addrspace(200) nounwind { ; ; HYBRID-LABEL: ptrtoint_null: ; HYBRID: # %bb.0: -; HYBRID-NEXT: li a0, 0 +; HYBRID-NEXT: cgettag a0, cnull +; HYBRID-NEXT: neg a0, a0 +; HYBRID-NEXT: and a0, zero, a0 ; HYBRID-NEXT: ret %ret = ptrtoint i8 addrspace(200)* null to i32 ret i32 %ret @@ -72,7 +80,10 @@ define internal i32 @ptrtoint_null_plus_const() addrspace(200) nounwind { ; ; HYBRID-LABEL: ptrtoint_null_plus_const: ; HYBRID: # %bb.0: -; HYBRID-NEXT: li a0, 2 +; HYBRID-NEXT: cgettag a0, cnull +; HYBRID-NEXT: neg a0, a0 +; HYBRID-NEXT: and a0, zero, a0 +; HYBRID-NEXT: addi a0, a0, 2 ; HYBRID-NEXT: ret %zero = ptrtoint i8 addrspace(200)* null to i32 %ret = add i32 %zero, 2 @@ -87,7 +98,10 @@ define internal i32 @ptrtoint_null_plus_var(i32 %add) addrspace(200) nounwind { ; ; HYBRID-LABEL: ptrtoint_null_plus_var: ; HYBRID: # %bb.0: -; HYBRID-NEXT: add a0, zero, a0 +; HYBRID-NEXT: cgettag a1, cnull +; HYBRID-NEXT: neg a1, a1 +; HYBRID-NEXT: and a1, zero, a1 +; HYBRID-NEXT: add a0, a1, a0 ; HYBRID-NEXT: ret %zero = ptrtoint i8 addrspace(200)* null to i32 %ret = add i32 %zero, %add diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/setoffset-multiple-uses.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/setoffset-multiple-uses.ll index fe227661e961c..b9a22c3b30d65 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/setoffset-multiple-uses.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/setoffset-multiple-uses.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 3 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/setoffset-multiple-uses.ll ; RUN: opt -S -passes=instcombine -o - %s | FileCheck %s ; RUN: opt -S -passes=instcombine -o - %s | llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f -O1 - -o - | %cheri_FileCheck %s --check-prefix ASM diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/stack-bounds-dynamic-alloca.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/stack-bounds-dynamic-alloca.ll index 5a4d91f63f6ef..c156990216aff 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/stack-bounds-dynamic-alloca.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/stack-bounds-dynamic-alloca.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/stack-bounds-dynamic-alloca.ll ; RUN: opt -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f -passes=cheri-bound-allocas -o - -S %s | FileCheck %s ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f -O0 %s -o - | FileCheck %s -check-prefix ASM @@ -62,7 +62,7 @@ define i32 @alloca_in_entry(i1 %arg) local_unnamed_addr addrspace(200) nounwind ; ASM-OPT-NEXT: .LBB0_2: # %exit ; ASM-OPT-NEXT: li a0, 123 ; ASM-OPT-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@alloca_in_entry +; CHECK-LABEL: define i32 @alloca_in_entry ; CHECK-SAME: (i1 [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ALLOCA:%.*]] = alloca [16 x i8], align 16, addrspace(200) @@ -70,8 +70,7 @@ define i32 @alloca_in_entry(i1 %arg) local_unnamed_addr addrspace(200) nounwind ; CHECK: do_alloca: ; CHECK-NEXT: br label [[USE_ALLOCA_NO_BOUNDS:%.*]] ; CHECK: use_alloca_no_bounds: -; CHECK-NEXT: [[PTR:%.*]] = bitcast ptr addrspace(200) [[ALLOCA]] to ptr addrspace(200) -; CHECK-NEXT: [[PTR_PLUS_ONE:%.*]] = getelementptr i64, ptr addrspace(200) [[PTR]], i64 1 +; CHECK-NEXT: [[PTR_PLUS_ONE:%.*]] = getelementptr i64, ptr addrspace(200) [[ALLOCA]], i64 1 ; CHECK-NEXT: store i64 1234, ptr addrspace(200) [[PTR_PLUS_ONE]], align 8 ; CHECK-NEXT: br label [[USE_ALLOCA_NEED_BOUNDS:%.*]] ; CHECK: use_alloca_need_bounds: @@ -90,8 +89,7 @@ do_alloca: ; preds = %entry br label %use_alloca_no_bounds use_alloca_no_bounds: ; preds = %do_alloca - %ptr = bitcast ptr addrspace(200) %alloca to ptr addrspace(200) - %ptr_plus_one = getelementptr i64, ptr addrspace(200) %ptr, i64 1 + %ptr_plus_one = getelementptr i64, ptr addrspace(200) %alloca, i64 1 store i64 1234, ptr addrspace(200) %ptr_plus_one, align 8 br label %use_alloca_need_bounds @@ -170,7 +168,7 @@ define i32 @alloca_not_in_entry(i1 %arg) local_unnamed_addr addrspace(200) nounw ; ASM-OPT-NEXT: .LBB1_2: # %exit ; ASM-OPT-NEXT: li a0, 123 ; ASM-OPT-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@alloca_not_in_entry +; CHECK-LABEL: define i32 @alloca_not_in_entry ; CHECK-SAME: (i1 [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 [[ARG]], label [[DO_ALLOCA:%.*]], label [[EXIT:%.*]] @@ -179,8 +177,7 @@ define i32 @alloca_not_in_entry(i1 %arg) local_unnamed_addr addrspace(200) nounw ; CHECK-NEXT: [[TMP0:%.*]] = call ptr addrspace(200) @llvm.cheri.bounded.stack.cap.dynamic.i32(ptr addrspace(200) [[ALLOCA]], i32 16) ; CHECK-NEXT: br label [[USE_ALLOCA_NO_BOUNDS:%.*]] ; CHECK: use_alloca_no_bounds: -; CHECK-NEXT: [[PTR:%.*]] = bitcast ptr addrspace(200) [[ALLOCA]] to ptr addrspace(200) -; CHECK-NEXT: [[PTR_PLUS_ONE:%.*]] = getelementptr i64, ptr addrspace(200) [[PTR]], i64 1 +; CHECK-NEXT: [[PTR_PLUS_ONE:%.*]] = getelementptr i64, ptr addrspace(200) [[ALLOCA]], i64 1 ; CHECK-NEXT: store i64 1234, ptr addrspace(200) [[PTR_PLUS_ONE]], align 8 ; CHECK-NEXT: br label [[USE_ALLOCA_NEED_BOUNDS:%.*]] ; CHECK: use_alloca_need_bounds: @@ -198,8 +195,7 @@ do_alloca: ; preds = %entry br label %use_alloca_no_bounds use_alloca_no_bounds: ; preds = %do_alloca - %ptr = bitcast ptr addrspace(200) %alloca to ptr addrspace(200) - %ptr_plus_one = getelementptr i64, ptr addrspace(200) %ptr, i64 1 + %ptr_plus_one = getelementptr i64, ptr addrspace(200) %alloca, i64 1 store i64 1234, ptr addrspace(200) %ptr_plus_one, align 8 br label %use_alloca_need_bounds @@ -272,7 +268,7 @@ define i32 @crash_reproducer(i1 %arg) local_unnamed_addr addrspace(200) nounwind ; ASM-OPT-NEXT: cincoffset csp, csp, 16 ; ASM-OPT-NEXT: cret ; ASM-OPT-NEXT: .LBB2_2: # %entry.while.end_crit_edge -; CHECK-LABEL: define {{[^@]+}}@crash_reproducer +; CHECK-LABEL: define i32 @crash_reproducer ; CHECK-SAME: (i1 [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 [[ARG]], label [[ENTRY_WHILE_END_CRIT_EDGE:%.*]], label [[WHILE_BODY:%.*]] diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/stack-bounds-opaque-spill-too-early.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/stack-bounds-opaque-spill-too-early.ll index f397286c2e4e0..255fb77b53738 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/stack-bounds-opaque-spill-too-early.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/stack-bounds-opaque-spill-too-early.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/stack-bounds-opaque-spill-too-early.ll ;; After merging to LLVM 15 the stack bounds pass the switch to opqaue pointers caused ;; miscompilations in the stack bounding pass (the unbounded value was used instead of @@ -48,7 +48,7 @@ define dso_local void @lazy_bind_args() addrspace(200) nounwind { ; ASM-NEXT: clc cra, 8(csp) # 8-byte Folded Reload ; ASM-NEXT: cincoffset csp, csp, 16 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@lazy_bind_args +; CHECK-LABEL: define dso_local void @lazy_bind_args ; CHECK-SAME: () addrspace(200) #[[ATTR2:[0-9]+]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CAP:%.*]] = alloca ptr addrspace(200), align 8, addrspace(200) diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/stack-bounds-pass-phi.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/stack-bounds-pass-phi.ll index 85f75316c00f0..c3972fa9fe5be 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/stack-bounds-pass-phi.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/stack-bounds-pass-phi.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/stack-bounds-pass-phi.ll ; REQUIRES: asserts ; RUN: opt -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f -passes=cheri-bound-allocas %s -o - -S -cheri-stack-bounds=if-needed \ @@ -48,7 +48,7 @@ define void @test_phi(i1 %cond) addrspace(200) nounwind { ; ASM-NEXT: clc cs0, 16(csp) # 8-byte Folded Reload ; ASM-NEXT: cincoffset csp, csp, 32 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@test_phi +; CHECK-LABEL: define void @test_phi ; CHECK-SAME: (i1 [[COND:%.*]]) addrspace(200) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ALLOCA1:%.*]] = alloca i32, align 4, addrspace(200) @@ -123,7 +123,7 @@ define void @test_only_created_in_predecessor_block(i1 %cond) addrspace(200) nou ; ASM-NEXT: clc cra, 8(csp) # 8-byte Folded Reload ; ASM-NEXT: cincoffset csp, csp, 16 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@test_only_created_in_predecessor_block +; CHECK-LABEL: define void @test_only_created_in_predecessor_block ; CHECK-SAME: (i1 [[COND:%.*]]) addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ALLOCA1:%.*]] = alloca i32, align 4, addrspace(200) diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/stack-spill-unnecessary.c.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/stack-spill-unnecessary.c.ll index 03743edb07c7c..70b13412cd5d5 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/stack-spill-unnecessary.c.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/stack-spill-unnecessary.c.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/stack-spill-unnecessary.c.ll ; The new CheriBoundedStackPseudo instruction lets us pretend that the incoffset+csetbounds ; is a single trivially rematerizable instruction so it can freely move it around to avoid stack spills. diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/stackframe-intrinsics.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/stackframe-intrinsics.ll index f3e2399aaf5de..1670ff55de111 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/stackframe-intrinsics.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/stackframe-intrinsics.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/stackframe-intrinsics.ll ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f %s -o - | FileCheck %s --check-prefix=PURECAP ; RUN: sed 's/addrspace(200)/addrspace(0)/g' %s | llc -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f | FileCheck %s --check-prefix HYBRID diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/strcpy-to-memcpy-no-tags.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/strcpy-to-memcpy-no-tags.ll index d49330ad663bc..f18c8bba532bd 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/strcpy-to-memcpy-no-tags.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/strcpy-to-memcpy-no-tags.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/strcpy-to-memcpy-no-tags.ll ; Check that we can inline the loads/stores generated when simplifiying ; string libcalls to memcpy() (since it should be marked as non-tag-preserving). @@ -34,7 +34,7 @@ define void @test_strcpy_to_memcpy(ptr addrspace(200) align 4 %dst) addrspace(20 ; CHECK-ASM-NEXT: addi a1, a1, -1947 ; CHECK-ASM-NEXT: csw a1, 0(ca0) ; CHECK-ASM-NEXT: cret -; CHECK-IR-LABEL: define {{[^@]+}}@test_strcpy_to_memcpy +; CHECK-IR-LABEL: define void @test_strcpy_to_memcpy ; CHECK-IR-SAME: (ptr addrspace(200) align 4 [[DST:%.*]]) addrspace(200) #[[ATTR1:[0-9]+]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: call void @llvm.memcpy.p200.p200.i32(ptr addrspace(200) noundef nonnull align 4 dereferenceable(17) [[DST]], ptr addrspace(200) noundef nonnull align 4 dereferenceable(17) @str, i32 17, i1 false) #[[ATTR4:[0-9]+]] @@ -62,7 +62,7 @@ define void @test_stpcpy_to_memcpy(ptr addrspace(200) align 4 %dst) addrspace(20 ; CHECK-ASM-NEXT: addi a1, a1, -1947 ; CHECK-ASM-NEXT: csw a1, 0(ca0) ; CHECK-ASM-NEXT: cret -; CHECK-IR-LABEL: define {{[^@]+}}@test_stpcpy_to_memcpy +; CHECK-IR-LABEL: define void @test_stpcpy_to_memcpy ; CHECK-IR-SAME: (ptr addrspace(200) align 4 [[DST:%.*]]) addrspace(200) #[[ATTR1]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: call void @llvm.memcpy.p200.p200.i32(ptr addrspace(200) noundef nonnull align 4 dereferenceable(17) [[DST]], ptr addrspace(200) noundef nonnull align 4 dereferenceable(17) @str, i32 17, i1 false) #[[ATTR5:[0-9]+]] @@ -92,7 +92,7 @@ define void @test_strcat_to_memcpy(ptr addrspace(200) align 4 %dst) addrspace(20 ; CHECK-ASM-NEXT: clc cs0, 0(csp) # 8-byte Folded Reload ; CHECK-ASM-NEXT: cincoffset csp, csp, 16 ; CHECK-ASM-NEXT: cret -; CHECK-IR-LABEL: define {{[^@]+}}@test_strcat_to_memcpy +; CHECK-IR-LABEL: define void @test_strcat_to_memcpy ; CHECK-IR-SAME: (ptr addrspace(200) align 4 [[DST:%.*]]) addrspace(200) #[[ATTR1]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: [[STRLEN:%.*]] = call i32 @strlen(ptr addrspace(200) noundef nonnull dereferenceable(1) [[DST]]) @@ -121,7 +121,7 @@ define void @test_strncpy_to_memcpy(ptr addrspace(200) align 4 %dst) addrspace(2 ; CHECK-ASM-NEXT: clc cra, 8(csp) # 8-byte Folded Reload ; CHECK-ASM-NEXT: cincoffset csp, csp, 16 ; CHECK-ASM-NEXT: cret -; CHECK-IR-LABEL: define {{[^@]+}}@test_strncpy_to_memcpy +; CHECK-IR-LABEL: define void @test_strncpy_to_memcpy ; CHECK-IR-SAME: (ptr addrspace(200) align 4 [[DST:%.*]]) addrspace(200) #[[ATTR1]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: [[CALL:%.*]] = call ptr addrspace(200) @strncpy(ptr addrspace(200) [[DST]], ptr addrspace(200) nonnull @str, i64 17) #[[ATTR1]] @@ -148,7 +148,7 @@ define void @test_stpncpy_to_memcpy(ptr addrspace(200) align 4 %dst) addrspace(2 ; CHECK-ASM-NEXT: clc cra, 8(csp) # 8-byte Folded Reload ; CHECK-ASM-NEXT: cincoffset csp, csp, 16 ; CHECK-ASM-NEXT: cret -; CHECK-IR-LABEL: define {{[^@]+}}@test_stpncpy_to_memcpy +; CHECK-IR-LABEL: define void @test_stpncpy_to_memcpy ; CHECK-IR-SAME: (ptr addrspace(200) align 4 [[DST:%.*]]) addrspace(200) #[[ATTR1]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: [[CALL:%.*]] = call ptr addrspace(200) @stpncpy(ptr addrspace(200) [[DST]], ptr addrspace(200) nonnull @str, i64 17) #[[ATTR1]] diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/subobject-bounds-redundant-setbounds.c.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/subobject-bounds-redundant-setbounds.c.ll index 482d9d94fd53e..1a1076df70a2f 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/subobject-bounds-redundant-setbounds.c.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/subobject-bounds-redundant-setbounds.c.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/subobject-bounds-redundant-setbounds.c.ll ; REQUIRES: asserts ; RUN: rm -f %t.dbg-opt %t.dbg-llc @@ -40,7 +40,7 @@ define void @use_inline(ptr addrspace(200) nocapture %arg) local_unnamed_addr ad ; ASM-NEXT: li a1, 2 ; ASM-NEXT: csw a1, 0(ca0) ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@use_inline +; CHECK-LABEL: define void @use_inline ; CHECK-SAME: (ptr addrspace(200) nocapture [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: store i32 2, ptr addrspace(200) [[ARG]], align 4 ; CHECK-NEXT: ret void @@ -64,7 +64,7 @@ define signext i32 @stack_array() local_unnamed_addr addrspace(200) nounwind { ; ASM-NEXT: clc cs0, 48(csp) # 8-byte Folded Reload ; ASM-NEXT: cincoffset csp, csp, 64 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@stack_array +; CHECK-LABEL: define signext i32 @stack_array ; CHECK-SAME: () local_unnamed_addr addrspace(200) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: [[ARRAY:%.*]] = alloca [10 x i32], align 4, addrspace(200) ; CHECK-NEXT: call void @llvm.lifetime.start.p200(i64 40, ptr addrspace(200) nonnull [[ARRAY]]) @@ -115,7 +115,7 @@ define signext i32 @stack_int() local_unnamed_addr addrspace(200) nounwind { ; ASM-NEXT: clc cra, 8(csp) # 8-byte Folded Reload ; ASM-NEXT: cincoffset csp, csp, 16 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@stack_int +; CHECK-LABEL: define signext i32 @stack_int ; CHECK-SAME: () local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: [[VALUE:%.*]] = alloca i32, align 4, addrspace(200) ; CHECK-NEXT: call void @llvm.lifetime.start.p200(i64 4, ptr addrspace(200) nonnull [[VALUE]]) @@ -163,7 +163,7 @@ define signext i32 @stack_int_inlined() local_unnamed_addr addrspace(200) nounwi ; ASM-NEXT: clw a0, 12(csp) ; ASM-NEXT: cincoffset csp, csp, 16 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@stack_int_inlined +; CHECK-LABEL: define signext i32 @stack_int_inlined ; CHECK-SAME: () local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: [[VALUE:%.*]] = alloca i32, align 4, addrspace(200) ; CHECK-NEXT: call void @llvm.lifetime.start.p200(i64 4, ptr addrspace(200) nonnull [[VALUE]]) @@ -210,7 +210,7 @@ define signext i32 @out_of_bounds_setbounds() local_unnamed_addr addrspace(200) ; ASM-NEXT: clw a0, 12(csp) ; ASM-NEXT: cincoffset csp, csp, 16 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@out_of_bounds_setbounds +; CHECK-LABEL: define signext i32 @out_of_bounds_setbounds ; CHECK-SAME: () local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: [[VALUE:%.*]] = alloca i32, align 4, addrspace(200) ; CHECK-NEXT: [[TMP1:%.*]] = call ptr addrspace(200) @llvm.cheri.bounded.stack.cap.i32(ptr addrspace(200) [[VALUE]], i32 4) @@ -251,7 +251,7 @@ define signext i32 @setbounds_escapes() local_unnamed_addr addrspace(200) nounwi ; ASM-NEXT: clc cra, 8(csp) # 8-byte Folded Reload ; ASM-NEXT: cincoffset csp, csp, 16 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@setbounds_escapes +; CHECK-LABEL: define signext i32 @setbounds_escapes ; CHECK-SAME: () local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: [[VALUE:%.*]] = alloca i32, align 4, addrspace(200) ; CHECK-NEXT: [[ADDRESS_WITH_BOUNDS:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i32(ptr addrspace(200) nonnull [[VALUE]], i32 4) @@ -287,7 +287,7 @@ define void @assume_aligned() local_unnamed_addr addrspace(200) nounwind { ; ASM-NEXT: csw a0, 12(csp) ; ASM-NEXT: cincoffset csp, csp, 16 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@assume_aligned +; CHECK-LABEL: define void @assume_aligned ; CHECK-SAME: () local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = alloca [4 x i8], align 4, addrspace(200) ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr addrspace(200) [[TMP1]], i64 4) ] diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/trunc-load.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/trunc-load.ll index 84f22bdee629f..c07083f39997a 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/trunc-load.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/trunc-load.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/trunc-load.ll ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f %s -o - < %s | FileCheck %s --check-prefix=PURECAP ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f -o - < %s | FileCheck %s --check-prefix=HYBRID diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/unaligned-loads-stores-hybrid.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/unaligned-loads-stores-hybrid.ll index 14920ff9d804a..4589ffe5a6707 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/unaligned-loads-stores-hybrid.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/unaligned-loads-stores-hybrid.ll @@ -13,7 +13,14 @@ define i64 @load_global_i64_align_1(i64 %y) addrspace(200) nounwind { ; CHECK-NEXT: .Lpcrel_hi0: ; CHECK-NEXT: auipc a0, %got_pcrel_hi(a1) ; CHECK-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi0)(a0) -; CHECK-NEXT: cfromptr ca0, ddc, a0 +; CHECK-NEXT: cspecialr ca1, ddc +; CHECK-NEXT: bnez a0, .LBB0_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: cmove ca0, cnull +; CHECK-NEXT: j .LBB0_3 +; CHECK-NEXT: .LBB0_2: +; CHECK-NEXT: csetaddr ca0, ca1, a0 +; CHECK-NEXT: .LBB0_3: ; CHECK-NEXT: lbu.cap a1, (ca0) ; CHECK-NEXT: cincoffset ca2, ca0, 1 ; CHECK-NEXT: cincoffset ca3, ca0, 2 @@ -52,7 +59,14 @@ define i64 @load_global_i64_align_2(i64 %y) addrspace(200) nounwind { ; CHECK-NEXT: .Lpcrel_hi1: ; CHECK-NEXT: auipc a0, %got_pcrel_hi(a2) ; CHECK-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi1)(a0) -; CHECK-NEXT: cfromptr ca0, ddc, a0 +; CHECK-NEXT: cspecialr ca1, ddc +; CHECK-NEXT: bnez a0, .LBB1_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: cmove ca0, cnull +; CHECK-NEXT: j .LBB1_3 +; CHECK-NEXT: .LBB1_2: +; CHECK-NEXT: csetaddr ca0, ca1, a0 +; CHECK-NEXT: .LBB1_3: ; CHECK-NEXT: lhu.cap a1, (ca0) ; CHECK-NEXT: cincoffset ca2, ca0, 2 ; CHECK-NEXT: cincoffset ca3, ca0, 4 @@ -75,7 +89,14 @@ define i64 @load_global_i64_align_4(i64 %y) addrspace(200) nounwind { ; CHECK-NEXT: .Lpcrel_hi2: ; CHECK-NEXT: auipc a0, %got_pcrel_hi(a4) ; CHECK-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi2)(a0) -; CHECK-NEXT: cfromptr ca1, ddc, a0 +; CHECK-NEXT: cspecialr ca1, ddc +; CHECK-NEXT: bnez a0, .LBB2_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: cmove ca1, cnull +; CHECK-NEXT: j .LBB2_3 +; CHECK-NEXT: .LBB2_2: +; CHECK-NEXT: csetaddr ca1, ca1, a0 +; CHECK-NEXT: .LBB2_3: ; CHECK-NEXT: lw.cap a0, (ca1) ; CHECK-NEXT: cincoffset ca1, ca1, 4 ; CHECK-NEXT: lw.cap a1, (ca1) @@ -90,7 +111,14 @@ define i64 @load_global_i64_align_8(i64 %y) addrspace(200) nounwind { ; CHECK-NEXT: .Lpcrel_hi3: ; CHECK-NEXT: auipc a0, %got_pcrel_hi(a8) ; CHECK-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi3)(a0) -; CHECK-NEXT: cfromptr ca1, ddc, a0 +; CHECK-NEXT: cspecialr ca1, ddc +; CHECK-NEXT: bnez a0, .LBB3_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: cmove ca1, cnull +; CHECK-NEXT: j .LBB3_3 +; CHECK-NEXT: .LBB3_2: +; CHECK-NEXT: csetaddr ca1, ca1, a0 +; CHECK-NEXT: .LBB3_3: ; CHECK-NEXT: lw.cap a0, (ca1) ; CHECK-NEXT: cincoffset ca1, ca1, 4 ; CHECK-NEXT: lw.cap a1, (ca1) @@ -105,28 +133,35 @@ define void @store_global_i64_align_1(i64 %y) addrspace(200) nounwind { ; CHECK-NEXT: .Lpcrel_hi4: ; CHECK-NEXT: auipc a2, %got_pcrel_hi(a1) ; CHECK-NEXT: lw a2, %pcrel_lo(.Lpcrel_hi4)(a2) -; CHECK-NEXT: srli a3, a1, 24 -; CHECK-NEXT: cfromptr ca2, ddc, a2 -; CHECK-NEXT: cincoffset ca4, ca2, 7 -; CHECK-NEXT: sb.cap a3, (ca4) -; CHECK-NEXT: srli a3, a1, 16 -; CHECK-NEXT: cincoffset ca4, ca2, 6 -; CHECK-NEXT: sb.cap a3, (ca4) -; CHECK-NEXT: srli a3, a1, 8 -; CHECK-NEXT: cincoffset ca4, ca2, 5 -; CHECK-NEXT: sb.cap a3, (ca4) -; CHECK-NEXT: srli a3, a0, 24 -; CHECK-NEXT: cincoffset ca4, ca2, 4 +; CHECK-NEXT: cspecialr ca3, ddc +; CHECK-NEXT: bnez a2, .LBB4_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: cmove ca2, cnull +; CHECK-NEXT: j .LBB4_3 +; CHECK-NEXT: .LBB4_2: +; CHECK-NEXT: csetaddr ca2, ca3, a2 +; CHECK-NEXT: .LBB4_3: +; CHECK-NEXT: sb.cap a0, (ca2) +; CHECK-NEXT: cincoffset ca3, ca2, 7 +; CHECK-NEXT: srli a4, a1, 24 +; CHECK-NEXT: sb.cap a4, (ca3) +; CHECK-NEXT: cincoffset ca3, ca2, 6 +; CHECK-NEXT: srli a4, a1, 16 +; CHECK-NEXT: sb.cap a4, (ca3) +; CHECK-NEXT: cincoffset ca3, ca2, 5 +; CHECK-NEXT: srli a4, a1, 8 +; CHECK-NEXT: sb.cap a4, (ca3) +; CHECK-NEXT: cincoffset ca3, ca2, 4 +; CHECK-NEXT: cincoffset ca4, ca2, 3 +; CHECK-NEXT: sb.cap a1, (ca3) +; CHECK-NEXT: srli a1, a0, 24 +; CHECK-NEXT: cincoffset ca3, ca2, 2 ; CHECK-NEXT: sb.cap a1, (ca4) ; CHECK-NEXT: srli a1, a0, 16 -; CHECK-NEXT: srli a4, a0, 8 -; CHECK-NEXT: sb.cap a0, (ca2) -; CHECK-NEXT: cincoffset ca0, ca2, 3 -; CHECK-NEXT: sb.cap a3, (ca0) -; CHECK-NEXT: cincoffset ca0, ca2, 2 ; CHECK-NEXT: cincoffset ca2, ca2, 1 -; CHECK-NEXT: sb.cap a1, (ca0) -; CHECK-NEXT: sb.cap a4, (ca2) +; CHECK-NEXT: srli a0, a0, 8 +; CHECK-NEXT: sb.cap a1, (ca3) +; CHECK-NEXT: sb.cap a0, (ca2) ; CHECK-NEXT: ret store i64 %y, i64 addrspace(200)* addrspacecast(i64* @a1 to i64 addrspace(200)*), align 1 ret void @@ -138,16 +173,23 @@ define void @store_global_i64_align_2(i64 %y) addrspace(200) nounwind { ; CHECK-NEXT: .Lpcrel_hi5: ; CHECK-NEXT: auipc a2, %got_pcrel_hi(a2) ; CHECK-NEXT: lw a2, %pcrel_lo(.Lpcrel_hi5)(a2) -; CHECK-NEXT: srli a3, a1, 16 -; CHECK-NEXT: srli a4, a0, 16 -; CHECK-NEXT: cfromptr ca2, ddc, a2 +; CHECK-NEXT: cspecialr ca3, ddc +; CHECK-NEXT: bnez a2, .LBB5_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: cmove ca2, cnull +; CHECK-NEXT: j .LBB5_3 +; CHECK-NEXT: .LBB5_2: +; CHECK-NEXT: csetaddr ca2, ca3, a2 +; CHECK-NEXT: .LBB5_3: ; CHECK-NEXT: sh.cap a0, (ca2) -; CHECK-NEXT: cincoffset ca0, ca2, 6 -; CHECK-NEXT: sh.cap a3, (ca0) -; CHECK-NEXT: cincoffset ca0, ca2, 4 +; CHECK-NEXT: cincoffset ca3, ca2, 6 +; CHECK-NEXT: srli a4, a1, 16 +; CHECK-NEXT: sh.cap a4, (ca3) +; CHECK-NEXT: cincoffset ca3, ca2, 4 ; CHECK-NEXT: cincoffset ca2, ca2, 2 -; CHECK-NEXT: sh.cap a1, (ca0) -; CHECK-NEXT: sh.cap a4, (ca2) +; CHECK-NEXT: srli a0, a0, 16 +; CHECK-NEXT: sh.cap a1, (ca3) +; CHECK-NEXT: sh.cap a0, (ca2) ; CHECK-NEXT: ret store i64 %y, i64 addrspace(200)* addrspacecast(i64* @a2 to i64 addrspace(200)*), align 2 ret void @@ -159,7 +201,14 @@ define void @store_global_i64_align_4(i64 %y) addrspace(200) nounwind { ; CHECK-NEXT: .Lpcrel_hi6: ; CHECK-NEXT: auipc a2, %got_pcrel_hi(a4) ; CHECK-NEXT: lw a2, %pcrel_lo(.Lpcrel_hi6)(a2) -; CHECK-NEXT: cfromptr ca2, ddc, a2 +; CHECK-NEXT: cspecialr ca3, ddc +; CHECK-NEXT: bnez a2, .LBB6_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: cmove ca2, cnull +; CHECK-NEXT: j .LBB6_3 +; CHECK-NEXT: .LBB6_2: +; CHECK-NEXT: csetaddr ca2, ca3, a2 +; CHECK-NEXT: .LBB6_3: ; CHECK-NEXT: sw.cap a0, (ca2) ; CHECK-NEXT: cincoffset ca0, ca2, 4 ; CHECK-NEXT: sw.cap a1, (ca0) @@ -174,7 +223,14 @@ define void @store_global_i64_align_8(i64 %y) addrspace(200) nounwind { ; CHECK-NEXT: .Lpcrel_hi7: ; CHECK-NEXT: auipc a2, %got_pcrel_hi(a8) ; CHECK-NEXT: lw a2, %pcrel_lo(.Lpcrel_hi7)(a2) -; CHECK-NEXT: cfromptr ca2, ddc, a2 +; CHECK-NEXT: cspecialr ca3, ddc +; CHECK-NEXT: bnez a2, .LBB7_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: cmove ca2, cnull +; CHECK-NEXT: j .LBB7_3 +; CHECK-NEXT: .LBB7_2: +; CHECK-NEXT: csetaddr ca2, ca3, a2 +; CHECK-NEXT: .LBB7_3: ; CHECK-NEXT: sw.cap a0, (ca2) ; CHECK-NEXT: cincoffset ca0, ca2, 4 ; CHECK-NEXT: sw.cap a1, (ca0) diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/unaligned-loads-stores-purecap.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/unaligned-loads-stores-purecap.ll index b34cc2c0f9529..0a95988bab271 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/unaligned-loads-stores-purecap.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/unaligned-loads-stores-purecap.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/unaligned-loads-stores-purecap.ll ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f %s -o - | FileCheck %s diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-rmw-cap-ptr-arg.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-rmw-cap-ptr-arg.ll index 7c228f5545af4..ab33f739afeb1 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-rmw-cap-ptr-arg.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-rmw-cap-ptr-arg.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/atomic-rmw-cap-ptr-arg.ll ; Check that we can generate sensible code for atomic operations using capability pointers on capabilities ; See https://github.com/CTSRD-CHERI/llvm-project/issues/470 diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/bounded-allocas-lifetimes.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/bounded-allocas-lifetimes.ll index 00c4c79831df9..9ff00fe1c1c24 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/bounded-allocas-lifetimes.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/bounded-allocas-lifetimes.ll @@ -1,5 +1,6 @@ -; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/bounded-allocas-lifetimes.ll +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; CHERI-GENERIC-UTC: mir ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d %s -o - --stop-after=finalize-isel | FileCheck %s diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/cap-from-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cap-from-ptr.ll index 7fbbc5c636007..239b52dadf5ba 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/cap-from-ptr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cap-from-ptr.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/cap-from-ptr.ll ;; Check that we can correctly generate code for llvm.cheri.cap.from.pointer() ;; This previously asserted on RISC-V due to a broken ISel pattern. @@ -7,17 +7,29 @@ ; RUN: opt -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d -passes=instcombine -S < %s | llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d | FileCheck %s --check-prefix=PURECAP ; RUN: opt -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d -passes=instcombine -S < %s | llc -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d | FileCheck %s --check-prefix=HYBRID -define internal i8 addrspace(200)* @test(i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %cap, i64 %offset) addrspace(200) nounwind { +define internal ptr addrspace(200) @test(ptr addrspace(200) %ptr, ptr addrspace(200) %cap, i64 %offset) addrspace(200) nounwind { ; PURECAP-LABEL: test: ; PURECAP: # %bb.0: # %entry -; PURECAP-NEXT: cfromptr ca1, ca1, a2 +; PURECAP-NEXT: bnez a2, .LBB0_2 +; PURECAP-NEXT: # %bb.1: # %entry +; PURECAP-NEXT: cmove ca1, cnull +; PURECAP-NEXT: j .LBB0_3 +; PURECAP-NEXT: .LBB0_2: +; PURECAP-NEXT: csetaddr ca1, ca1, a2 +; PURECAP-NEXT: .LBB0_3: # %entry ; PURECAP-NEXT: csc ca1, 0(ca0) ; PURECAP-NEXT: cmove ca0, ca1 ; PURECAP-NEXT: cret ; ; HYBRID-LABEL: test: ; HYBRID: # %bb.0: # %entry -; HYBRID-NEXT: cfromptr ca1, ca1, a2 +; HYBRID-NEXT: bnez a2, .LBB0_2 +; HYBRID-NEXT: # %bb.1: # %entry +; HYBRID-NEXT: cmove ca1, cnull +; HYBRID-NEXT: j .LBB0_3 +; HYBRID-NEXT: .LBB0_2: +; HYBRID-NEXT: csetaddr ca1, ca1, a2 +; HYBRID-NEXT: .LBB0_3: # %entry ; HYBRID-NEXT: sc.cap ca1, (ca0) ; HYBRID-NEXT: cmove ca0, ca1 ; HYBRID-NEXT: ret @@ -29,13 +41,13 @@ define internal i8 addrspace(200)* @test(i8 addrspace(200)* addrspace(200)* %ptr ; CHECK-IR-NEXT: ret ptr addrspace(200) [[NEW]] ; entry: - %new = call i8 addrspace(200)* @llvm.cheri.cap.from.pointer.i64(i8 addrspace(200)* %cap, i64 %offset) - store i8 addrspace(200)* %new, i8 addrspace(200)* addrspace(200)* %ptr, align 16 - ret i8 addrspace(200)* %new + %new = call ptr addrspace(200) @llvm.cheri.cap.from.pointer.i64(ptr addrspace(200) %cap, i64 %offset) + store ptr addrspace(200) %new, ptr addrspace(200) %ptr, align 16 + ret ptr addrspace(200) %new } ;; (int_cheri_cap_from_ptr x, 0) -> null -define internal i8 addrspace(200)* @cap_from_ptr_zero(i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %cap) nounwind { +define internal ptr addrspace(200) @cap_from_ptr_zero(ptr addrspace(200) %ptr, ptr addrspace(200) %cap) nounwind { ; PURECAP-LABEL: cap_from_ptr_zero: ; PURECAP: # %bb.0: # %entry ; PURECAP-NEXT: csc cnull, 0(ca0) @@ -47,30 +59,44 @@ define internal i8 addrspace(200)* @cap_from_ptr_zero(i8 addrspace(200)* addrspa ; HYBRID-NEXT: sc.cap cnull, (ca0) ; HYBRID-NEXT: cmove ca0, cnull ; HYBRID-NEXT: ret -; CHECK-IR-LABEL: define {{[^@]+}}@cap_from_ptr_zero +; CHECK-IR-LABEL: define internal ptr addrspace(200) @cap_from_ptr_zero ; CHECK-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], ptr addrspace(200) [[CAP:%.*]]) #[[ATTR0]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: store ptr addrspace(200) null, ptr addrspace(200) [[PTR]], align 16 ; CHECK-IR-NEXT: ret ptr addrspace(200) null ; entry: - %new = call i8 addrspace(200)* @llvm.cheri.cap.from.pointer.i64(i8 addrspace(200)* %cap, i64 0) - store i8 addrspace(200)* %new, i8 addrspace(200)* addrspace(200)* %ptr, align 16 - ret i8 addrspace(200)* %new + %new = call ptr addrspace(200) @llvm.cheri.cap.from.pointer.i64(ptr addrspace(200) %cap, i64 0) + store ptr addrspace(200) %new, ptr addrspace(200) %ptr, align 16 + ret ptr addrspace(200) %new } ;; Check that (int_cheri_cap_from_ptr ddc, x) can use the DDC register directly -define internal i8 addrspace(200)* @cap_from_ptr_ddc(i8 addrspace(200)* addrspace(200)* %ptr, i64 %offset) addrspace(200) nounwind { +define internal ptr addrspace(200) @cap_from_ptr_ddc(ptr addrspace(200) %ptr, i64 %offset) addrspace(200) nounwind { ; PURECAP-LABEL: cap_from_ptr_ddc: ; PURECAP: # %bb.0: # %entry -; PURECAP-NEXT: cfromptr ca1, ddc, a1 +; PURECAP-NEXT: cspecialr ca2, ddc +; PURECAP-NEXT: bnez a1, .LBB2_2 +; PURECAP-NEXT: # %bb.1: # %entry +; PURECAP-NEXT: cmove ca1, cnull +; PURECAP-NEXT: j .LBB2_3 +; PURECAP-NEXT: .LBB2_2: +; PURECAP-NEXT: csetaddr ca1, ca2, a1 +; PURECAP-NEXT: .LBB2_3: # %entry ; PURECAP-NEXT: csc ca1, 0(ca0) ; PURECAP-NEXT: cmove ca0, ca1 ; PURECAP-NEXT: cret ; ; HYBRID-LABEL: cap_from_ptr_ddc: ; HYBRID: # %bb.0: # %entry -; HYBRID-NEXT: cfromptr ca1, ddc, a1 +; HYBRID-NEXT: cspecialr ca2, ddc +; HYBRID-NEXT: bnez a1, .LBB2_2 +; HYBRID-NEXT: # %bb.1: # %entry +; HYBRID-NEXT: cmove ca1, cnull +; HYBRID-NEXT: j .LBB2_3 +; HYBRID-NEXT: .LBB2_2: +; HYBRID-NEXT: csetaddr ca1, ca2, a1 +; HYBRID-NEXT: .LBB2_3: # %entry ; HYBRID-NEXT: sc.cap ca1, (ca0) ; HYBRID-NEXT: cmove ca0, ca1 ; HYBRID-NEXT: ret @@ -83,14 +109,14 @@ define internal i8 addrspace(200)* @cap_from_ptr_ddc(i8 addrspace(200)* addrspac ; CHECK-IR-NEXT: ret ptr addrspace(200) [[NEW]] ; entry: - %ddc = call i8 addrspace(200)* @llvm.cheri.ddc.get() - %new = call i8 addrspace(200)* @llvm.cheri.cap.from.pointer.i64(i8 addrspace(200)* %ddc, i64 %offset) - store i8 addrspace(200)* %new, i8 addrspace(200)* addrspace(200)* %ptr, align 16 - ret i8 addrspace(200)* %new + %ddc = call ptr addrspace(200) @llvm.cheri.ddc.get() + %new = call ptr addrspace(200) @llvm.cheri.cap.from.pointer.i64(ptr addrspace(200) %ddc, i64 %offset) + store ptr addrspace(200) %new, ptr addrspace(200) %ptr, align 16 + ret ptr addrspace(200) %new } ;; Check that (int_cheri_cap_from_ptr x, 0) -> null has priority over direct DDC usage -define internal i8 addrspace(200)* @cap_from_ptr_ddc_zero(i8 addrspace(200)* addrspace(200)* %ptr) addrspace(200) nounwind { +define internal ptr addrspace(200) @cap_from_ptr_ddc_zero(ptr addrspace(200) %ptr) addrspace(200) nounwind { ; PURECAP-LABEL: cap_from_ptr_ddc_zero: ; PURECAP: # %bb.0: # %entry ; PURECAP-NEXT: csc cnull, 0(ca0) @@ -109,26 +135,36 @@ define internal i8 addrspace(200)* @cap_from_ptr_ddc_zero(i8 addrspace(200)* add ; CHECK-IR-NEXT: ret ptr addrspace(200) null ; entry: - %ddc = call i8 addrspace(200)* @llvm.cheri.ddc.get() - %new = call i8 addrspace(200)* @llvm.cheri.cap.from.pointer.i64(i8 addrspace(200)* %ddc, i64 0) - store i8 addrspace(200)* %new, i8 addrspace(200)* addrspace(200)* %ptr, align 16 - ret i8 addrspace(200)* %new + %ddc = call ptr addrspace(200) @llvm.cheri.ddc.get() + %new = call ptr addrspace(200) @llvm.cheri.cap.from.pointer.i64(ptr addrspace(200) %ddc, i64 0) + store ptr addrspace(200) %new, ptr addrspace(200) %ptr, align 16 + ret ptr addrspace(200) %new } ;; Check that (int_cheri_cap_from_ptr null, x) does not use register zero (since that is DDC) -define internal i8 addrspace(200)* @cap_from_ptr_null(i8 addrspace(200)* addrspace(200)* %ptr, i64 %offset) addrspace(200) nounwind { +define internal ptr addrspace(200) @cap_from_ptr_null(ptr addrspace(200) %ptr, i64 %offset) addrspace(200) nounwind { ; PURECAP-LABEL: cap_from_ptr_null: ; PURECAP: # %bb.0: # %entry -; PURECAP-NEXT: cmove ca2, cnull -; PURECAP-NEXT: cfromptr ca1, ca2, a1 +; PURECAP-NEXT: bnez a1, .LBB4_2 +; PURECAP-NEXT: # %bb.1: # %entry +; PURECAP-NEXT: cmove ca1, cnull +; PURECAP-NEXT: j .LBB4_3 +; PURECAP-NEXT: .LBB4_2: +; PURECAP-NEXT: csetaddr ca1, cnull, a1 +; PURECAP-NEXT: .LBB4_3: # %entry ; PURECAP-NEXT: csc ca1, 0(ca0) ; PURECAP-NEXT: cmove ca0, ca1 ; PURECAP-NEXT: cret ; ; HYBRID-LABEL: cap_from_ptr_null: ; HYBRID: # %bb.0: # %entry -; HYBRID-NEXT: cmove ca2, cnull -; HYBRID-NEXT: cfromptr ca1, ca2, a1 +; HYBRID-NEXT: bnez a1, .LBB4_2 +; HYBRID-NEXT: # %bb.1: # %entry +; HYBRID-NEXT: cmove ca1, cnull +; HYBRID-NEXT: j .LBB4_3 +; HYBRID-NEXT: .LBB4_2: +; HYBRID-NEXT: csetaddr ca1, cnull, a1 +; HYBRID-NEXT: .LBB4_3: # %entry ; HYBRID-NEXT: sc.cap ca1, (ca0) ; HYBRID-NEXT: cmove ca0, ca1 ; HYBRID-NEXT: ret @@ -140,10 +176,10 @@ define internal i8 addrspace(200)* @cap_from_ptr_null(i8 addrspace(200)* addrspa ; CHECK-IR-NEXT: ret ptr addrspace(200) [[NEW]] ; entry: - %new = call i8 addrspace(200)* @llvm.cheri.cap.from.pointer.i64(i8 addrspace(200)* null, i64 %offset) - store i8 addrspace(200)* %new, i8 addrspace(200)* addrspace(200)* %ptr, align 16 - ret i8 addrspace(200)* %new + %new = call ptr addrspace(200) @llvm.cheri.cap.from.pointer.i64(ptr addrspace(200) null, i64 %offset) + store ptr addrspace(200) %new, ptr addrspace(200) %ptr, align 16 + ret ptr addrspace(200) %new } -declare i8 addrspace(200)* @llvm.cheri.cap.from.pointer.i64(i8 addrspace(200)*, i64) -declare i8 addrspace(200)* @llvm.cheri.ddc.get() +declare ptr addrspace(200) @llvm.cheri.cap.from.pointer.i64(ptr addrspace(200), i64) +declare ptr addrspace(200) @llvm.cheri.ddc.get() diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/cheri-csub.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cheri-csub.ll index c829bf6b91ee8..5cb3b4342b037 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/cheri-csub.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cheri-csub.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/cheri-csub.ll ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d %s -o - | FileCheck %s --check-prefix=HYBRID ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d %s -o - | FileCheck %s --check-prefix=PURECAP diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/cheri-intrinsics-folding-broken-module-regression.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cheri-intrinsics-folding-broken-module-regression.ll index c5a7591eaa9fb..aab44e96f1eb7 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/cheri-intrinsics-folding-broken-module-regression.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cheri-intrinsics-folding-broken-module-regression.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/cheri-intrinsics-folding-broken-module-regression.ll ; This used to create a broken function. ; FIXME: the getoffset+add sequence should be folded to an increment diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/cheri-memfn-call.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cheri-memfn-call.ll index 9c6a884b53d43..3107b0cde4969 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/cheri-memfn-call.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cheri-memfn-call.ll @@ -1,13 +1,13 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/cheri-memfn-call.ll ; Check that we call memset_c/memmove_c/memcpy_c in hybrid mode. ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d %s -o - | FileCheck %s --check-prefix=PURECAP ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d %s -o - | FileCheck %s --check-prefix=HYBRID %struct.x = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } -declare void @llvm.memmove.p200i8.p200i8.i64(ptr addrspace(200) nocapture, ptr addrspace(200) nocapture readonly, i64, i1) -declare void @llvm.memset.p200i8.i64(ptr addrspace(200) nocapture, i8, i64, i1) -declare void @llvm.memcpy.p200i8.p200i8.i64(ptr addrspace(200) nocapture, ptr addrspace(200) nocapture readonly, i64, i1) +declare void @llvm.memmove.p200.p200.i64(ptr addrspace(200) nocapture, ptr addrspace(200) nocapture readonly, i64, i1) +declare void @llvm.memset.p200.i64(ptr addrspace(200) nocapture, i8, i64, i1) +declare void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) nocapture, ptr addrspace(200) nocapture readonly, i64, i1) define void @call_memset(ptr addrspace(200) align 4 %dst) nounwind { ; PURECAP-LABEL: call_memset: @@ -32,7 +32,7 @@ define void @call_memset(ptr addrspace(200) align 4 %dst) nounwind { ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret entry: - call void @llvm.memset.p200i8.i64(ptr addrspace(200) align 4 %dst, i8 0, i64 40, i1 false) + call void @llvm.memset.p200.i64(ptr addrspace(200) align 4 %dst, i8 0, i64 40, i1 false) ret void } @@ -57,7 +57,7 @@ define void @call_memcpy(ptr addrspace(200) align 4 %dst, ptr addrspace(200) ali ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret entry: - call void @llvm.memcpy.p200i8.p200i8.i64(ptr addrspace(200) align 4 %dst, ptr addrspace(200) align 4 %src, i64 40, i1 false) + call void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) align 4 %dst, ptr addrspace(200) align 4 %src, i64 40, i1 false) ret void } @@ -82,7 +82,7 @@ define void @call_memmove(ptr addrspace(200) align 4 %dst, ptr addrspace(200) al ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret entry: - call void @llvm.memmove.p200i8.p200i8.i64(ptr addrspace(200) align 4 %dst, ptr addrspace(200) align 4 %src, i64 40, i1 false) + call void @llvm.memmove.p200.p200.i64(ptr addrspace(200) align 4 %dst, ptr addrspace(200) align 4 %src, i64 40, i1 false) ret void } diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/cheri-pointer-comparison.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cheri-pointer-comparison.ll index 937081935f98b..b6fa5582958d6 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/cheri-pointer-comparison.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cheri-pointer-comparison.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/cheri-pointer-comparison.ll ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d %s -o - | FileCheck %s --check-prefix=HYBRID ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d %s -o - | FileCheck %s --check-prefix=PURECAP diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/dagcombine-ptradd-deleted-regression.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/dagcombine-ptradd-deleted-regression.ll index 1ad12b3fb84ed..ce370954abcd9 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/dagcombine-ptradd-deleted-regression.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/dagcombine-ptradd-deleted-regression.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/dagcombine-ptradd-deleted-regression.ll ; This would previously crash DAGCombiner::visitPTRADD since the PTRADD ; corresponding to the second GEP would be collapsed to a no-op when diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/function-alias-size.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/function-alias-size.ll index 27eefbddb9e50..0ff895acb80d5 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/function-alias-size.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/function-alias-size.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/function-alias-size.ll ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d %s -o - < %s | FileCheck %s --check-prefix=ASM ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d %s -o - -filetype=obj < %s | llvm-objdump --syms -r - | FileCheck %s --check-prefix=OBJDUMP diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/gvn-capability-store-to-load-fwd.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/gvn-capability-store-to-load-fwd.ll index 96849648f5e58..aa26defc127a4 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/gvn-capability-store-to-load-fwd.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/gvn-capability-store-to-load-fwd.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/gvn-capability-store-to-load-fwd.ll ; Check that GVN does not attempt to read capability fields that it can't get the bits for ; This is https://github.com/CTSRD-CHERI/llvm-project/issues/385 @@ -18,11 +18,10 @@ target datalayout = "e-m:e-pf200:128:128:128:64-p:64:64-i64:64-i128:128-n64-S128-A200-P200-G200" -%0 = type { i8, i8, [14 x i8] } -%struct.addrinfo = type { i32, i32, i32, i32, i32, i8 addrspace(200)*, %0 addrspace(200)*, %struct.addrinfo addrspace(200)* } +%struct.addrinfo = type { i32, i32, i32, i32, i32, ptr addrspace(200), ptr addrspace(200), ptr addrspace(200) } -define i32 @first_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_addr addrspace(200) nounwind { +define i32 @first_i32_store_to_load_fwd(ptr addrspace(200) %arg) local_unnamed_addr addrspace(200) nounwind { ; ASM-LABEL: first_i32_store_to_load_fwd: ; ASM: # %bb.0: ; ASM-NEXT: cincoffset csp, csp, -80 @@ -30,7 +29,7 @@ define i32 @first_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_a ; ASM-NEXT: clw a0, 0(csp) ; ASM-NEXT: cincoffset csp, csp, 80 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@first_i32_store_to_load_fwd +; CHECK-LABEL: define i32 @first_i32_store_to_load_fwd ; CHECK-SAME: (ptr addrspace(200) [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[STACKVAL:%.*]] = alloca [[STRUCT_ADDRINFO:%.*]], align 16, addrspace(200) ; CHECK-NEXT: store ptr addrspace(200) [[ARG]], ptr addrspace(200) [[STACKVAL]], align 16 @@ -38,14 +37,13 @@ define i32 @first_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_a ; CHECK-NEXT: ret i32 [[RESULT]] ; %stackval = alloca %struct.addrinfo, align 16, addrspace(200) - %field = getelementptr inbounds %struct.addrinfo, %struct.addrinfo addrspace(200)* %stackval, i64 0, i32 0 - %as_cap = bitcast %struct.addrinfo addrspace(200)* %stackval to i8 addrspace(200)* addrspace(200)* - store i8 addrspace(200)* %arg, i8 addrspace(200)* addrspace(200)* %as_cap, align 16 - %result = load i32, i32 addrspace(200)* %field, align 4 + %field = getelementptr inbounds %struct.addrinfo, ptr addrspace(200) %stackval, i64 0, i32 0 + store ptr addrspace(200) %arg, ptr addrspace(200) %stackval, align 16 + %result = load i32, ptr addrspace(200) %field, align 4 ret i32 %result } -define i32 @second_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_addr addrspace(200) nounwind { +define i32 @second_i32_store_to_load_fwd(ptr addrspace(200) %arg) local_unnamed_addr addrspace(200) nounwind { ; ASM-LABEL: second_i32_store_to_load_fwd: ; ASM: # %bb.0: ; ASM-NEXT: cincoffset csp, csp, -80 @@ -53,7 +51,7 @@ define i32 @second_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_ ; ASM-NEXT: clw a0, 4(csp) ; ASM-NEXT: cincoffset csp, csp, 80 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@second_i32_store_to_load_fwd +; CHECK-LABEL: define i32 @second_i32_store_to_load_fwd ; CHECK-SAME: (ptr addrspace(200) [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR0]] { ; CHECK-NEXT: [[STACKVAL:%.*]] = alloca [[STRUCT_ADDRINFO:%.*]], align 16, addrspace(200) ; CHECK-NEXT: [[FIELD:%.*]] = getelementptr inbounds [[STRUCT_ADDRINFO]], ptr addrspace(200) [[STACKVAL]], i64 0, i32 1 @@ -62,14 +60,13 @@ define i32 @second_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_ ; CHECK-NEXT: ret i32 [[RESULT]] ; %stackval = alloca %struct.addrinfo, align 16, addrspace(200) - %field = getelementptr inbounds %struct.addrinfo, %struct.addrinfo addrspace(200)* %stackval, i64 0, i32 1 - %as_cap = bitcast %struct.addrinfo addrspace(200)* %stackval to i8 addrspace(200)* addrspace(200)* - store i8 addrspace(200)* %arg, i8 addrspace(200)* addrspace(200)* %as_cap, align 16 - %result = load i32, i32 addrspace(200)* %field, align 4 + %field = getelementptr inbounds %struct.addrinfo, ptr addrspace(200) %stackval, i64 0, i32 1 + store ptr addrspace(200) %arg, ptr addrspace(200) %stackval, align 16 + %result = load i32, ptr addrspace(200) %field, align 4 ret i32 %result } -define i32 @third_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_addr addrspace(200) nounwind { +define i32 @third_i32_store_to_load_fwd(ptr addrspace(200) %arg) local_unnamed_addr addrspace(200) nounwind { ; ASM-LABEL: third_i32_store_to_load_fwd: ; ASM: # %bb.0: ; ASM-NEXT: cincoffset csp, csp, -80 @@ -77,7 +74,7 @@ define i32 @third_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_a ; ASM-NEXT: clw a0, 8(csp) ; ASM-NEXT: cincoffset csp, csp, 80 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@third_i32_store_to_load_fwd +; CHECK-LABEL: define i32 @third_i32_store_to_load_fwd ; CHECK-SAME: (ptr addrspace(200) [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR0]] { ; CHECK-NEXT: [[STACKVAL:%.*]] = alloca [[STRUCT_ADDRINFO:%.*]], align 16, addrspace(200) ; CHECK-NEXT: [[FIELD:%.*]] = getelementptr inbounds [[STRUCT_ADDRINFO]], ptr addrspace(200) [[STACKVAL]], i64 0, i32 2 @@ -86,14 +83,13 @@ define i32 @third_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_a ; CHECK-NEXT: ret i32 [[RESULT]] ; %stackval = alloca %struct.addrinfo, align 16, addrspace(200) - %field = getelementptr inbounds %struct.addrinfo, %struct.addrinfo addrspace(200)* %stackval, i64 0, i32 2 - %as_cap = bitcast %struct.addrinfo addrspace(200)* %stackval to i8 addrspace(200)* addrspace(200)* - store i8 addrspace(200)* %arg, i8 addrspace(200)* addrspace(200)* %as_cap, align 16 - %result = load i32, i32 addrspace(200)* %field, align 4 + %field = getelementptr inbounds %struct.addrinfo, ptr addrspace(200) %stackval, i64 0, i32 2 + store ptr addrspace(200) %arg, ptr addrspace(200) %stackval, align 16 + %result = load i32, ptr addrspace(200) %field, align 4 ret i32 %result } -define i32 @fourth_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_addr addrspace(200) nounwind { +define i32 @fourth_i32_store_to_load_fwd(ptr addrspace(200) %arg) local_unnamed_addr addrspace(200) nounwind { ; ASM-LABEL: fourth_i32_store_to_load_fwd: ; ASM: # %bb.0: ; ASM-NEXT: cincoffset csp, csp, -80 @@ -101,7 +97,7 @@ define i32 @fourth_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_ ; ASM-NEXT: clw a0, 12(csp) ; ASM-NEXT: cincoffset csp, csp, 80 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@fourth_i32_store_to_load_fwd +; CHECK-LABEL: define i32 @fourth_i32_store_to_load_fwd ; CHECK-SAME: (ptr addrspace(200) [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR0]] { ; CHECK-NEXT: [[STACKVAL:%.*]] = alloca [[STRUCT_ADDRINFO:%.*]], align 16, addrspace(200) ; CHECK-NEXT: [[FIELD:%.*]] = getelementptr inbounds [[STRUCT_ADDRINFO]], ptr addrspace(200) [[STACKVAL]], i64 0, i32 3 @@ -110,9 +106,8 @@ define i32 @fourth_i32_store_to_load_fwd(i8 addrspace(200)* %arg) local_unnamed_ ; CHECK-NEXT: ret i32 [[RESULT]] ; %stackval = alloca %struct.addrinfo, align 16, addrspace(200) - %field = getelementptr inbounds %struct.addrinfo, %struct.addrinfo addrspace(200)* %stackval, i64 0, i32 3 - %as_cap = bitcast %struct.addrinfo addrspace(200)* %stackval to i8 addrspace(200)* addrspace(200)* - store i8 addrspace(200)* %arg, i8 addrspace(200)* addrspace(200)* %as_cap, align 16 - %result = load i32, i32 addrspace(200)* %field, align 4 + %field = getelementptr inbounds %struct.addrinfo, ptr addrspace(200) %stackval, i64 0, i32 3 + store ptr addrspace(200) %arg, ptr addrspace(200) %stackval, align 16 + %result = load i32, ptr addrspace(200) %field, align 4 ret i32 %result } diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/hoist-alloca.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/hoist-alloca.ll index 8e806df8d5c43..4c7a9fe17d987 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/hoist-alloca.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/hoist-alloca.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/hoist-alloca.ll ; REQUIRES: asserts ; Check that we can hoist the csetbounds for a local alloca outside of loops diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/intrinsics-purecap-only.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/intrinsics-purecap-only.ll index f254d6f9f8440..cd1d6130a5727 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/intrinsics-purecap-only.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/intrinsics-purecap-only.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/intrinsics-purecap-only.ll ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d < %s -o - | FileCheck %s --check-prefix=PURECAP ; RUN: not --crash llc -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d < %s -o - 2>&1 | FileCheck %s --check-prefix HYBRID-ERROR diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/intrinsics.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/intrinsics.ll index 504aed6d52a6e..5c8971671ea33 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/intrinsics.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/intrinsics.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/intrinsics.ll ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d %s -o - < %s | FileCheck %s --check-prefix=PURECAP ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d -o - < %s | FileCheck %s --check-prefix=HYBRID @@ -415,12 +415,16 @@ declare i8 addrspace(200)* @llvm.cheri.pcc.get() define i64 @to_pointer(i8 addrspace(200)* %cap1, i8 addrspace(200)* %cap2) nounwind { ; PURECAP-LABEL: to_pointer: ; PURECAP: # %bb.0: -; PURECAP-NEXT: ctoptr a0, ca0, ca1 +; PURECAP-NEXT: cgettag a0, ca1 +; PURECAP-NEXT: neg a0, a0 +; PURECAP-NEXT: and a0, a1, a0 ; PURECAP-NEXT: cret ; ; HYBRID-LABEL: to_pointer: ; HYBRID: # %bb.0: -; HYBRID-NEXT: ctoptr a0, ca0, ca1 +; HYBRID-NEXT: cgettag a0, ca1 +; HYBRID-NEXT: neg a0, a0 +; HYBRID-NEXT: and a0, a1, a0 ; HYBRID-NEXT: ret %ptr = call i64 @llvm.cheri.cap.to.pointer(i8 addrspace(200)* %cap1, i8 addrspace(200)* %cap2) ret i64 %ptr @@ -429,12 +433,16 @@ define i64 @to_pointer(i8 addrspace(200)* %cap1, i8 addrspace(200)* %cap2) nounw define i64 @to_pointer_ddc_relative(i8 addrspace(200)* %cap) nounwind { ; PURECAP-LABEL: to_pointer_ddc_relative: ; PURECAP: # %bb.0: -; PURECAP-NEXT: ctoptr a0, ca0, ddc +; PURECAP-NEXT: cgettag a1, ca0 +; PURECAP-NEXT: neg a1, a1 +; PURECAP-NEXT: and a0, a0, a1 ; PURECAP-NEXT: cret ; ; HYBRID-LABEL: to_pointer_ddc_relative: ; HYBRID: # %bb.0: -; HYBRID-NEXT: ctoptr a0, ca0, ddc +; HYBRID-NEXT: cgettag a1, ca0 +; HYBRID-NEXT: neg a1, a1 +; HYBRID-NEXT: and a0, a0, a1 ; HYBRID-NEXT: ret %ddc = call i8 addrspace(200)* @llvm.cheri.ddc.get() %ptr = call i64 @llvm.cheri.cap.to.pointer(i8 addrspace(200)* %ddc, i8 addrspace(200)* %cap) @@ -444,12 +452,22 @@ define i64 @to_pointer_ddc_relative(i8 addrspace(200)* %cap) nounwind { define i8 addrspace(200)* @from_pointer(i8 addrspace(200)* %cap, i64 %ptr) nounwind { ; PURECAP-LABEL: from_pointer: ; PURECAP: # %bb.0: -; PURECAP-NEXT: cfromptr ca0, ca0, a1 +; PURECAP-NEXT: bnez a1, .LBB28_2 +; PURECAP-NEXT: # %bb.1: +; PURECAP-NEXT: cmove ca0, cnull +; PURECAP-NEXT: cret +; PURECAP-NEXT: .LBB28_2: +; PURECAP-NEXT: csetaddr ca0, ca0, a1 ; PURECAP-NEXT: cret ; ; HYBRID-LABEL: from_pointer: ; HYBRID: # %bb.0: -; HYBRID-NEXT: cfromptr ca0, ca0, a1 +; HYBRID-NEXT: bnez a1, .LBB28_2 +; HYBRID-NEXT: # %bb.1: +; HYBRID-NEXT: cmove ca0, cnull +; HYBRID-NEXT: ret +; HYBRID-NEXT: .LBB28_2: +; HYBRID-NEXT: csetaddr ca0, ca0, a1 ; HYBRID-NEXT: ret %newcap = call i8 addrspace(200)* @llvm.cheri.cap.from.pointer(i8 addrspace(200)* %cap, i64 %ptr) ret i8 addrspace(200)* %newcap @@ -458,12 +476,24 @@ define i8 addrspace(200)* @from_pointer(i8 addrspace(200)* %cap, i64 %ptr) nounw define i8 addrspace(200)* @from_ddc(i64 %ptr) nounwind { ; PURECAP-LABEL: from_ddc: ; PURECAP: # %bb.0: -; PURECAP-NEXT: cfromptr ca0, ddc, a0 +; PURECAP-NEXT: cspecialr ca1, ddc +; PURECAP-NEXT: bnez a0, .LBB29_2 +; PURECAP-NEXT: # %bb.1: +; PURECAP-NEXT: cmove ca0, cnull +; PURECAP-NEXT: cret +; PURECAP-NEXT: .LBB29_2: +; PURECAP-NEXT: csetaddr ca0, ca1, a0 ; PURECAP-NEXT: cret ; ; HYBRID-LABEL: from_ddc: ; HYBRID: # %bb.0: -; HYBRID-NEXT: cfromptr ca0, ddc, a0 +; HYBRID-NEXT: cspecialr ca1, ddc +; HYBRID-NEXT: bnez a0, .LBB29_2 +; HYBRID-NEXT: # %bb.1: +; HYBRID-NEXT: cmove ca0, cnull +; HYBRID-NEXT: ret +; HYBRID-NEXT: .LBB29_2: +; HYBRID-NEXT: csetaddr ca0, ca1, a0 ; HYBRID-NEXT: ret %ddc = call i8 addrspace(200)* @llvm.cheri.ddc.get() %cap = call i8 addrspace(200)* @llvm.cheri.cap.from.pointer(i8 addrspace(200)* %ddc, i64 %ptr) diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/machinelicm-hoist-csetbounds.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/machinelicm-hoist-csetbounds.ll index cf5ae2eb36498..f3150ff894b2f 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/machinelicm-hoist-csetbounds.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/machinelicm-hoist-csetbounds.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/machinelicm-hoist-csetbounds.ll ; Previously LLVM would hoist CSetBounds instructions out of if conditions/loops ; even if the source pointer could be NULL. On MIPS and RISC-V this results in a @@ -32,37 +32,41 @@ declare ptr addrspace(200) @llvm.cheri.cap.bounds.set.i64(ptr addrspace(200), i6 define dso_local void @hoist_csetbounds(i32 signext %cond, ptr addrspace(200) %f) local_unnamed_addr addrspace(200) nounwind { ; CHECK-LABEL: hoist_csetbounds: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: cincoffset csp, csp, -80 -; CHECK-NEXT: csc cra, 64(csp) # 16-byte Folded Spill -; CHECK-NEXT: csc cs0, 48(csp) # 16-byte Folded Spill -; CHECK-NEXT: csc cs1, 32(csp) # 16-byte Folded Spill -; CHECK-NEXT: csc cs2, 16(csp) # 16-byte Folded Spill -; CHECK-NEXT: csc cs3, 0(csp) # 16-byte Folded Spill +; CHECK-NEXT: cincoffset csp, csp, -96 +; CHECK-NEXT: csc cra, 80(csp) # 16-byte Folded Spill +; CHECK-NEXT: csc cs0, 64(csp) # 16-byte Folded Spill +; CHECK-NEXT: csc cs1, 48(csp) # 16-byte Folded Spill +; CHECK-NEXT: csc cs2, 32(csp) # 16-byte Folded Spill +; CHECK-NEXT: csc cs3, 16(csp) # 16-byte Folded Spill +; CHECK-NEXT: csc cs4, 0(csp) # 16-byte Folded Spill ; CHECK-NEXT: cmove cs0, ca1 -; CHECK-NEXT: cincoffset cs1, ca1, 4 -; CHECK-NEXT: li s2, -1 -; CHECK-NEXT: li s3, 99 +; CHECK-NEXT: cincoffset ca0, ca1, 4 +; CHECK-NEXT: li s3, -1 +; CHECK-NEXT: li s4, 99 +; CHECK-NEXT: csetbounds cs2, ca1, 4 +; CHECK-NEXT: csetbounds cs1, ca0, 4 ; CHECK-NEXT: j .LBB0_2 ; CHECK-NEXT: .LBB0_1: # %for.inc ; CHECK-NEXT: # in Loop: Header=BB0_2 Depth=1 -; CHECK-NEXT: addiw s2, s2, 1 -; CHECK-NEXT: bgeu s2, s3, .LBB0_4 +; CHECK-NEXT: addiw s3, s3, 1 +; CHECK-NEXT: bgeu s3, s4, .LBB0_4 ; CHECK-NEXT: .LBB0_2: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: beqz s0, .LBB0_1 ; CHECK-NEXT: # %bb.3: # %if.then ; CHECK-NEXT: # in Loop: Header=BB0_2 Depth=1 -; CHECK-NEXT: csetbounds ca0, cs0, 4 -; CHECK-NEXT: csetbounds ca1, cs1, 4 +; CHECK-NEXT: cmove ca0, cs2 +; CHECK-NEXT: cmove ca1, cs1 ; CHECK-NEXT: ccall call ; CHECK-NEXT: j .LBB0_1 ; CHECK-NEXT: .LBB0_4: # %for.cond.cleanup -; CHECK-NEXT: clc cra, 64(csp) # 16-byte Folded Reload -; CHECK-NEXT: clc cs0, 48(csp) # 16-byte Folded Reload -; CHECK-NEXT: clc cs1, 32(csp) # 16-byte Folded Reload -; CHECK-NEXT: clc cs2, 16(csp) # 16-byte Folded Reload -; CHECK-NEXT: clc cs3, 0(csp) # 16-byte Folded Reload -; CHECK-NEXT: cincoffset csp, csp, 80 +; CHECK-NEXT: clc cra, 80(csp) # 16-byte Folded Reload +; CHECK-NEXT: clc cs0, 64(csp) # 16-byte Folded Reload +; CHECK-NEXT: clc cs1, 48(csp) # 16-byte Folded Reload +; CHECK-NEXT: clc cs2, 32(csp) # 16-byte Folded Reload +; CHECK-NEXT: clc cs3, 16(csp) # 16-byte Folded Reload +; CHECK-NEXT: clc cs4, 0(csp) # 16-byte Folded Reload +; CHECK-NEXT: cincoffset csp, csp, 96 ; CHECK-NEXT: cret ; HOIST-OPT-LABEL: define dso_local void @hoist_csetbounds ; HOIST-OPT-SAME: (i32 signext [[COND:%.*]], ptr addrspace(200) [[F:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR0:[0-9]+]] { @@ -71,22 +75,20 @@ define dso_local void @hoist_csetbounds(i32 signext %cond, ptr addrspace(200) %f ; HOIST-OPT-NEXT: br i1 [[TOBOOL]], label [[FOR_COND_CLEANUP:%.*]], label [[ENTRY_SPLIT:%.*]] ; HOIST-OPT: entry.split: ; HOIST-OPT-NEXT: [[DST:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(200) [[F]], i64 4 -; HOIST-OPT-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i64(ptr addrspace(200) nonnull [[F]], i64 4) -; HOIST-OPT-NEXT: [[TMP1:%.*]] = tail call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i64(ptr addrspace(200) nonnull [[DST]], i64 4) +; HOIST-OPT-NEXT: [[ADDRESS_WITH_BOUNDS:%.*]] = tail call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i64(ptr addrspace(200) nonnull [[F]], i64 4) +; HOIST-OPT-NEXT: [[ADDRESS_WITH_BOUNDS1:%.*]] = tail call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i64(ptr addrspace(200) nonnull [[DST]], i64 4) ; HOIST-OPT-NEXT: br label [[FOR_BODY:%.*]] ; HOIST-OPT: for.cond.cleanup: ; HOIST-OPT-NEXT: ret void ; HOIST-OPT: for.body: ; HOIST-OPT-NEXT: [[I_06:%.*]] = phi i32 [ 0, [[ENTRY_SPLIT]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; HOIST-OPT-NEXT: tail call void @call(ptr addrspace(200) [[TMP0]], ptr addrspace(200) [[TMP1]]) +; HOIST-OPT-NEXT: tail call void @call(ptr addrspace(200) [[ADDRESS_WITH_BOUNDS]], ptr addrspace(200) [[ADDRESS_WITH_BOUNDS1]]) ; HOIST-OPT-NEXT: [[INC]] = add nuw nsw i32 [[I_06]], 1 ; HOIST-OPT-NEXT: [[CMP:%.*]] = icmp samesign ult i32 [[I_06]], 99 ; HOIST-OPT-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP]] entry: %tobool = icmp eq ptr addrspace(200) %f, null - %0 = bitcast ptr addrspace(200) %f to ptr addrspace(200) %dst = getelementptr inbounds %struct.foo, ptr addrspace(200) %f, i64 0, i32 1 - %1 = bitcast ptr addrspace(200) %dst to ptr addrspace(200) br label %for.body for.cond.cleanup: ; preds = %for.inc @@ -97,10 +99,8 @@ for.body: ; preds = %for.inc, %entry br i1 %tobool, label %for.inc, label %if.then if.then: ; preds = %for.body - %2 = call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i64(ptr addrspace(200) nonnull %0, i64 4) - %address.with.bounds = bitcast ptr addrspace(200) %2 to ptr addrspace(200) - %3 = call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i64(ptr addrspace(200) nonnull %1, i64 4) - %address.with.bounds1 = bitcast ptr addrspace(200) %3 to ptr addrspace(200) + %address.with.bounds = call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i64(ptr addrspace(200) nonnull %f, i64 4) + %address.with.bounds1 = call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i64(ptr addrspace(200) nonnull %dst, i64 4) call void @call(ptr addrspace(200) %address.with.bounds, ptr addrspace(200) %address.with.bounds1) br label %for.inc diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/memcpy-from-constant.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/memcpy-from-constant.ll index 663a72de4dcc7..aa9e71c43562e 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/memcpy-from-constant.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/memcpy-from-constant.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes --force-update +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/memcpy-from-constant.ll ;; Copying from a zero constant can be converted to a memset (even with the tag preservation flags) ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d < %s -o - | FileCheck %s diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/memcpy-no-preserve-tags-attr.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/memcpy-no-preserve-tags-attr.ll index 8db29ef0bc11b..65b68c6e373fc 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/memcpy-no-preserve-tags-attr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/memcpy-no-preserve-tags-attr.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/memcpy-no-preserve-tags-attr.ll ; Check that the no_preserve_tags annotation on memcpy/memmove intrinsics allows ; use to inline struct copies >= capability size. diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/memcpy-preserve-tags-size-not-multiple.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/memcpy-preserve-tags-size-not-multiple.ll index b2dcff62921f5..701c2939998fd 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/memcpy-preserve-tags-size-not-multiple.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/memcpy-preserve-tags-size-not-multiple.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/memcpy-preserve-tags-size-not-multiple.ll ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d -o - -O0 -verify-machineinstrs %s | FileCheck %s -check-prefixes CHECK ; Check that we can inline memmove/memcpy despite having the must_preserve_cheri_tags property and the size not diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/memcpy-zeroinit.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/memcpy-zeroinit.ll index b9738d79524bc..2b95ba6e61dfd 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/memcpy-zeroinit.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/memcpy-zeroinit.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/memcpy-zeroinit.ll ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d < %s -o - | FileCheck %s ; Check that the copy from the zeroinitializer global is turned into a series of zero stores diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/optsize-preserve-tags-memcpy-crash.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/optsize-preserve-tags-memcpy-crash.ll index 4c1b7b6a325cb..6afd328503839 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/optsize-preserve-tags-memcpy-crash.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/optsize-preserve-tags-memcpy-crash.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/optsize-preserve-tags-memcpy-crash.ll ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d < %s -o - | FileCheck %s ; The following code copying 31 bytes (with capability alignment) using the diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/ptrtoint.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/ptrtoint.ll index cc8a96cc1d573..26bf336a28814 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/ptrtoint.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/ptrtoint.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/ptrtoint.ll ;; Check that we can correctly generate code for ptrtoint and perform simple folds ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d < %s | FileCheck %s @@ -12,7 +12,9 @@ define internal i64 @ptrtoint(i8 addrspace(200)* %cap) addrspace(200) nounwind { ; ; HYBRID-LABEL: ptrtoint: ; HYBRID: # %bb.0: -; HYBRID-NEXT: ctoptr a0, ca0, ddc +; HYBRID-NEXT: cgettag a1, ca0 +; HYBRID-NEXT: neg a1, a1 +; HYBRID-NEXT: and a0, a0, a1 ; HYBRID-NEXT: ret %ret = ptrtoint i8 addrspace(200)* %cap to i64 ret i64 %ret @@ -26,7 +28,9 @@ define internal i64 @ptrtoint_plus_const(i8 addrspace(200)* %cap) addrspace(200) ; ; HYBRID-LABEL: ptrtoint_plus_const: ; HYBRID: # %bb.0: -; HYBRID-NEXT: ctoptr a0, ca0, ddc +; HYBRID-NEXT: cgettag a1, ca0 +; HYBRID-NEXT: neg a1, a1 +; HYBRID-NEXT: and a0, a0, a1 ; HYBRID-NEXT: addi a0, a0, 2 ; HYBRID-NEXT: ret %zero = ptrtoint i8 addrspace(200)* %cap to i64 @@ -42,7 +46,9 @@ define internal i64 @ptrtoint_plus_var(i8 addrspace(200)* %cap, i64 %add) addrsp ; ; HYBRID-LABEL: ptrtoint_plus_var: ; HYBRID: # %bb.0: -; HYBRID-NEXT: ctoptr a0, ca0, ddc +; HYBRID-NEXT: cgettag a2, ca0 +; HYBRID-NEXT: neg a2, a2 +; HYBRID-NEXT: and a0, a0, a2 ; HYBRID-NEXT: add a0, a0, a1 ; HYBRID-NEXT: ret %zero = ptrtoint i8 addrspace(200)* %cap to i64 @@ -58,7 +64,9 @@ define internal i64 @ptrtoint_null() addrspace(200) nounwind { ; ; HYBRID-LABEL: ptrtoint_null: ; HYBRID: # %bb.0: -; HYBRID-NEXT: li a0, 0 +; HYBRID-NEXT: cgettag a0, cnull +; HYBRID-NEXT: neg a0, a0 +; HYBRID-NEXT: and a0, zero, a0 ; HYBRID-NEXT: ret %ret = ptrtoint i8 addrspace(200)* null to i64 ret i64 %ret @@ -72,7 +80,10 @@ define internal i64 @ptrtoint_null_plus_const() addrspace(200) nounwind { ; ; HYBRID-LABEL: ptrtoint_null_plus_const: ; HYBRID: # %bb.0: -; HYBRID-NEXT: li a0, 2 +; HYBRID-NEXT: cgettag a0, cnull +; HYBRID-NEXT: neg a0, a0 +; HYBRID-NEXT: and a0, zero, a0 +; HYBRID-NEXT: addi a0, a0, 2 ; HYBRID-NEXT: ret %zero = ptrtoint i8 addrspace(200)* null to i64 %ret = add i64 %zero, 2 @@ -87,7 +98,10 @@ define internal i64 @ptrtoint_null_plus_var(i64 %add) addrspace(200) nounwind { ; ; HYBRID-LABEL: ptrtoint_null_plus_var: ; HYBRID: # %bb.0: -; HYBRID-NEXT: add a0, zero, a0 +; HYBRID-NEXT: cgettag a1, cnull +; HYBRID-NEXT: neg a1, a1 +; HYBRID-NEXT: and a1, zero, a1 +; HYBRID-NEXT: add a0, a1, a0 ; HYBRID-NEXT: ret %zero = ptrtoint i8 addrspace(200)* null to i64 %ret = add i64 %zero, %add diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/setoffset-multiple-uses.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/setoffset-multiple-uses.ll index 53e905c3635ca..45c37889cab36 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/setoffset-multiple-uses.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/setoffset-multiple-uses.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 3 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/setoffset-multiple-uses.ll ; RUN: opt -S -passes=instcombine -o - %s | FileCheck %s ; RUN: opt -S -passes=instcombine -o - %s | llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d -O1 - -o - | %cheri_FileCheck %s --check-prefix ASM diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/stack-bounds-dynamic-alloca.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/stack-bounds-dynamic-alloca.ll index 25e8a561e4235..7a905d6de0379 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/stack-bounds-dynamic-alloca.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/stack-bounds-dynamic-alloca.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/stack-bounds-dynamic-alloca.ll ; RUN: opt -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d -passes=cheri-bound-allocas -o - -S %s | FileCheck %s ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d -O0 %s -o - | FileCheck %s -check-prefix ASM @@ -59,7 +59,7 @@ define i32 @alloca_in_entry(i1 %arg) local_unnamed_addr addrspace(200) nounwind ; ASM-OPT-NEXT: .LBB0_2: # %exit ; ASM-OPT-NEXT: li a0, 123 ; ASM-OPT-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@alloca_in_entry +; CHECK-LABEL: define i32 @alloca_in_entry ; CHECK-SAME: (i1 [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ALLOCA:%.*]] = alloca [16 x i8], align 16, addrspace(200) @@ -67,8 +67,7 @@ define i32 @alloca_in_entry(i1 %arg) local_unnamed_addr addrspace(200) nounwind ; CHECK: do_alloca: ; CHECK-NEXT: br label [[USE_ALLOCA_NO_BOUNDS:%.*]] ; CHECK: use_alloca_no_bounds: -; CHECK-NEXT: [[PTR:%.*]] = bitcast ptr addrspace(200) [[ALLOCA]] to ptr addrspace(200) -; CHECK-NEXT: [[PTR_PLUS_ONE:%.*]] = getelementptr i64, ptr addrspace(200) [[PTR]], i64 1 +; CHECK-NEXT: [[PTR_PLUS_ONE:%.*]] = getelementptr i64, ptr addrspace(200) [[ALLOCA]], i64 1 ; CHECK-NEXT: store i64 1234, ptr addrspace(200) [[PTR_PLUS_ONE]], align 8 ; CHECK-NEXT: br label [[USE_ALLOCA_NEED_BOUNDS:%.*]] ; CHECK: use_alloca_need_bounds: @@ -87,8 +86,7 @@ do_alloca: ; preds = %entry br label %use_alloca_no_bounds use_alloca_no_bounds: ; preds = %do_alloca - %ptr = bitcast ptr addrspace(200) %alloca to ptr addrspace(200) - %ptr_plus_one = getelementptr i64, ptr addrspace(200) %ptr, i64 1 + %ptr_plus_one = getelementptr i64, ptr addrspace(200) %alloca, i64 1 store i64 1234, ptr addrspace(200) %ptr_plus_one, align 8 br label %use_alloca_need_bounds @@ -164,7 +162,7 @@ define i32 @alloca_not_in_entry(i1 %arg) local_unnamed_addr addrspace(200) nounw ; ASM-OPT-NEXT: .LBB1_2: # %exit ; ASM-OPT-NEXT: li a0, 123 ; ASM-OPT-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@alloca_not_in_entry +; CHECK-LABEL: define i32 @alloca_not_in_entry ; CHECK-SAME: (i1 [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 [[ARG]], label [[DO_ALLOCA:%.*]], label [[EXIT:%.*]] @@ -173,8 +171,7 @@ define i32 @alloca_not_in_entry(i1 %arg) local_unnamed_addr addrspace(200) nounw ; CHECK-NEXT: [[TMP0:%.*]] = call ptr addrspace(200) @llvm.cheri.bounded.stack.cap.dynamic.i64(ptr addrspace(200) [[ALLOCA]], i64 16) ; CHECK-NEXT: br label [[USE_ALLOCA_NO_BOUNDS:%.*]] ; CHECK: use_alloca_no_bounds: -; CHECK-NEXT: [[PTR:%.*]] = bitcast ptr addrspace(200) [[ALLOCA]] to ptr addrspace(200) -; CHECK-NEXT: [[PTR_PLUS_ONE:%.*]] = getelementptr i64, ptr addrspace(200) [[PTR]], i64 1 +; CHECK-NEXT: [[PTR_PLUS_ONE:%.*]] = getelementptr i64, ptr addrspace(200) [[ALLOCA]], i64 1 ; CHECK-NEXT: store i64 1234, ptr addrspace(200) [[PTR_PLUS_ONE]], align 8 ; CHECK-NEXT: br label [[USE_ALLOCA_NEED_BOUNDS:%.*]] ; CHECK: use_alloca_need_bounds: @@ -192,8 +189,7 @@ do_alloca: ; preds = %entry br label %use_alloca_no_bounds use_alloca_no_bounds: ; preds = %do_alloca - %ptr = bitcast ptr addrspace(200) %alloca to ptr addrspace(200) - %ptr_plus_one = getelementptr i64, ptr addrspace(200) %ptr, i64 1 + %ptr_plus_one = getelementptr i64, ptr addrspace(200) %alloca, i64 1 store i64 1234, ptr addrspace(200) %ptr_plus_one, align 8 br label %use_alloca_need_bounds @@ -266,7 +262,7 @@ define i32 @crash_reproducer(i1 %arg) local_unnamed_addr addrspace(200) nounwind ; ASM-OPT-NEXT: cincoffset csp, csp, 32 ; ASM-OPT-NEXT: cret ; ASM-OPT-NEXT: .LBB2_2: # %entry.while.end_crit_edge -; CHECK-LABEL: define {{[^@]+}}@crash_reproducer +; CHECK-LABEL: define i32 @crash_reproducer ; CHECK-SAME: (i1 [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 [[ARG]], label [[ENTRY_WHILE_END_CRIT_EDGE:%.*]], label [[WHILE_BODY:%.*]] diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/stack-bounds-opaque-spill-too-early.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/stack-bounds-opaque-spill-too-early.ll index ffa73b700152d..43618c83eb198 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/stack-bounds-opaque-spill-too-early.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/stack-bounds-opaque-spill-too-early.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/stack-bounds-opaque-spill-too-early.ll ;; After merging to LLVM 15 the stack bounds pass the switch to opqaue pointers caused ;; miscompilations in the stack bounding pass (the unbounded value was used instead of @@ -48,7 +48,7 @@ define dso_local void @lazy_bind_args() addrspace(200) nounwind { ; ASM-NEXT: clc cra, 16(csp) # 16-byte Folded Reload ; ASM-NEXT: cincoffset csp, csp, 32 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@lazy_bind_args +; CHECK-LABEL: define dso_local void @lazy_bind_args ; CHECK-SAME: () addrspace(200) #[[ATTR2:[0-9]+]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CAP:%.*]] = alloca ptr addrspace(200), align 16, addrspace(200) diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/stack-bounds-pass-phi.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/stack-bounds-pass-phi.ll index e2e9005c377e4..b9901e1714dec 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/stack-bounds-pass-phi.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/stack-bounds-pass-phi.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/stack-bounds-pass-phi.ll ; REQUIRES: asserts ; RUN: opt -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d -passes=cheri-bound-allocas %s -o - -S -cheri-stack-bounds=if-needed \ @@ -48,7 +48,7 @@ define void @test_phi(i1 %cond) addrspace(200) nounwind { ; ASM-NEXT: clc cs0, 16(csp) # 16-byte Folded Reload ; ASM-NEXT: cincoffset csp, csp, 48 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@test_phi +; CHECK-LABEL: define void @test_phi ; CHECK-SAME: (i1 [[COND:%.*]]) addrspace(200) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ALLOCA1:%.*]] = alloca i32, align 4, addrspace(200) @@ -123,7 +123,7 @@ define void @test_only_created_in_predecessor_block(i1 %cond) addrspace(200) nou ; ASM-NEXT: clc cra, 16(csp) # 16-byte Folded Reload ; ASM-NEXT: cincoffset csp, csp, 32 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@test_only_created_in_predecessor_block +; CHECK-LABEL: define void @test_only_created_in_predecessor_block ; CHECK-SAME: (i1 [[COND:%.*]]) addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ALLOCA1:%.*]] = alloca i32, align 4, addrspace(200) diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/stack-spill-unnecessary.c.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/stack-spill-unnecessary.c.ll index a460ef216d1ee..8676442a2e806 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/stack-spill-unnecessary.c.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/stack-spill-unnecessary.c.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/stack-spill-unnecessary.c.ll ; The new CheriBoundedStackPseudo instruction lets us pretend that the incoffset+csetbounds ; is a single trivially rematerizable instruction so it can freely move it around to avoid stack spills. diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/stackframe-intrinsics.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/stackframe-intrinsics.ll index 7307af293cb2b..8a58ed16379c3 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/stackframe-intrinsics.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/stackframe-intrinsics.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/stackframe-intrinsics.ll ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d %s -o - < %s | FileCheck %s --check-prefix=PURECAP ; RUN: sed 's/addrspace(200)/addrspace(0)/g' %s | llc -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d | FileCheck %s --check-prefix HYBRID diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/strcpy-to-memcpy-no-tags.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/strcpy-to-memcpy-no-tags.ll index 8fb4417cfc8e1..cabca11ccde31 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/strcpy-to-memcpy-no-tags.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/strcpy-to-memcpy-no-tags.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/strcpy-to-memcpy-no-tags.ll ; Check that we can inline the loads/stores generated when simplifiying ; string libcalls to memcpy() (since it should be marked as non-tag-preserving). @@ -34,7 +34,7 @@ define void @test_strcpy_to_memcpy(ptr addrspace(200) align 8 %dst) addrspace(20 ; CHECK-ASM-NEXT: csb zero, 16(ca0) ; CHECK-ASM-NEXT: csd a2, 0(ca0) ; CHECK-ASM-NEXT: cret -; CHECK-IR-LABEL: define {{[^@]+}}@test_strcpy_to_memcpy +; CHECK-IR-LABEL: define void @test_strcpy_to_memcpy ; CHECK-IR-SAME: (ptr addrspace(200) align 8 [[DST:%.*]]) addrspace(200) #[[ATTR1:[0-9]+]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: call void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) noundef nonnull align 8 dereferenceable(17) [[DST]], ptr addrspace(200) noundef nonnull align 8 dereferenceable(17) @str, i64 17, i1 false) #[[ATTR4:[0-9]+]] @@ -61,7 +61,7 @@ define void @test_stpcpy_to_memcpy(ptr addrspace(200) align 8 %dst) addrspace(20 ; CHECK-ASM-NEXT: csb zero, 16(ca0) ; CHECK-ASM-NEXT: csd a2, 0(ca0) ; CHECK-ASM-NEXT: cret -; CHECK-IR-LABEL: define {{[^@]+}}@test_stpcpy_to_memcpy +; CHECK-IR-LABEL: define void @test_stpcpy_to_memcpy ; CHECK-IR-SAME: (ptr addrspace(200) align 8 [[DST:%.*]]) addrspace(200) #[[ATTR1]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: call void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) noundef nonnull align 8 dereferenceable(17) [[DST]], ptr addrspace(200) noundef nonnull align 8 dereferenceable(17) @str, i64 17, i1 false) #[[ATTR5:[0-9]+]] @@ -90,7 +90,7 @@ define void @test_strcat_to_memcpy(ptr addrspace(200) align 8 %dst) addrspace(20 ; CHECK-ASM-NEXT: clc cs0, 0(csp) # 16-byte Folded Reload ; CHECK-ASM-NEXT: cincoffset csp, csp, 32 ; CHECK-ASM-NEXT: cret -; CHECK-IR-LABEL: define {{[^@]+}}@test_strcat_to_memcpy +; CHECK-IR-LABEL: define void @test_strcat_to_memcpy ; CHECK-IR-SAME: (ptr addrspace(200) align 8 [[DST:%.*]]) addrspace(200) #[[ATTR1]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: [[STRLEN:%.*]] = call i64 @strlen(ptr addrspace(200) noundef nonnull dereferenceable(1) [[DST]]) @@ -120,7 +120,7 @@ define void @test_strncpy_to_memcpy(ptr addrspace(200) align 8 %dst) addrspace(2 ; CHECK-ASM-NEXT: csb zero, 16(ca0) ; CHECK-ASM-NEXT: csd a2, 0(ca0) ; CHECK-ASM-NEXT: cret -; CHECK-IR-LABEL: define {{[^@]+}}@test_strncpy_to_memcpy +; CHECK-IR-LABEL: define void @test_strncpy_to_memcpy ; CHECK-IR-SAME: (ptr addrspace(200) align 8 [[DST:%.*]]) addrspace(200) #[[ATTR1]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: call void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) noundef nonnull align 8 dereferenceable(17) [[DST]], ptr addrspace(200) noundef nonnull align 8 dereferenceable(17) @str, i64 17, i1 false) #[[ATTR4]] @@ -148,7 +148,7 @@ define void @test_stpncpy_to_memcpy(ptr addrspace(200) align 8 %dst) addrspace(2 ; CHECK-ASM-NEXT: csb zero, 16(ca0) ; CHECK-ASM-NEXT: csd a2, 0(ca0) ; CHECK-ASM-NEXT: cret -; CHECK-IR-LABEL: define {{[^@]+}}@test_stpncpy_to_memcpy +; CHECK-IR-LABEL: define void @test_stpncpy_to_memcpy ; CHECK-IR-SAME: (ptr addrspace(200) align 8 [[DST:%.*]]) addrspace(200) #[[ATTR1]] { ; CHECK-IR-NEXT: entry: ; CHECK-IR-NEXT: call void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) noundef nonnull align 8 dereferenceable(17) [[DST]], ptr addrspace(200) noundef nonnull align 8 dereferenceable(17) @str, i64 17, i1 false) #[[ATTR4]] diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/subobject-bounds-redundant-setbounds.c.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/subobject-bounds-redundant-setbounds.c.ll index 8d1a1fa9a67bd..b853c3b7aa7ba 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/subobject-bounds-redundant-setbounds.c.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/subobject-bounds-redundant-setbounds.c.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/subobject-bounds-redundant-setbounds.c.ll ; REQUIRES: asserts ; RUN: rm -f %t.dbg-opt %t.dbg-llc @@ -40,7 +40,7 @@ define void @use_inline(ptr addrspace(200) nocapture %arg) local_unnamed_addr ad ; ASM-NEXT: li a1, 2 ; ASM-NEXT: csw a1, 0(ca0) ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@use_inline +; CHECK-LABEL: define void @use_inline ; CHECK-SAME: (ptr addrspace(200) nocapture [[ARG:%.*]]) local_unnamed_addr addrspace(200) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: store i32 2, ptr addrspace(200) [[ARG]], align 4 ; CHECK-NEXT: ret void @@ -63,7 +63,7 @@ define signext i32 @stack_array() local_unnamed_addr addrspace(200) nounwind { ; ASM-NEXT: clc cs0, 48(csp) # 16-byte Folded Reload ; ASM-NEXT: cincoffset csp, csp, 80 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@stack_array +; CHECK-LABEL: define signext i32 @stack_array ; CHECK-SAME: () local_unnamed_addr addrspace(200) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: [[ARRAY:%.*]] = alloca [10 x i32], align 4, addrspace(200) ; CHECK-NEXT: call void @llvm.lifetime.start.p200(i64 40, ptr addrspace(200) nonnull [[ARRAY]]) @@ -113,7 +113,7 @@ define signext i32 @stack_int() local_unnamed_addr addrspace(200) nounwind { ; ASM-NEXT: clc cra, 16(csp) # 16-byte Folded Reload ; ASM-NEXT: cincoffset csp, csp, 32 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@stack_int +; CHECK-LABEL: define signext i32 @stack_int ; CHECK-SAME: () local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: [[VALUE:%.*]] = alloca i32, align 4, addrspace(200) ; CHECK-NEXT: call void @llvm.lifetime.start.p200(i64 4, ptr addrspace(200) nonnull [[VALUE]]) @@ -160,7 +160,7 @@ define signext i32 @stack_int_inlined() local_unnamed_addr addrspace(200) nounwi ; ASM-NEXT: clw a0, 12(csp) ; ASM-NEXT: cincoffset csp, csp, 16 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@stack_int_inlined +; CHECK-LABEL: define signext i32 @stack_int_inlined ; CHECK-SAME: () local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: [[VALUE:%.*]] = alloca i32, align 4, addrspace(200) ; CHECK-NEXT: call void @llvm.lifetime.start.p200(i64 4, ptr addrspace(200) nonnull [[VALUE]]) @@ -206,7 +206,7 @@ define signext i32 @out_of_bounds_setbounds() local_unnamed_addr addrspace(200) ; ASM-NEXT: clw a0, 12(csp) ; ASM-NEXT: cincoffset csp, csp, 16 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@out_of_bounds_setbounds +; CHECK-LABEL: define signext i32 @out_of_bounds_setbounds ; CHECK-SAME: () local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: [[VALUE:%.*]] = alloca i32, align 4, addrspace(200) ; CHECK-NEXT: [[TMP1:%.*]] = call ptr addrspace(200) @llvm.cheri.bounded.stack.cap.i64(ptr addrspace(200) [[VALUE]], i64 4) @@ -246,7 +246,7 @@ define signext i32 @setbounds_escapes() local_unnamed_addr addrspace(200) nounwi ; ASM-NEXT: clc cra, 16(csp) # 16-byte Folded Reload ; ASM-NEXT: cincoffset csp, csp, 32 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@setbounds_escapes +; CHECK-LABEL: define signext i32 @setbounds_escapes ; CHECK-SAME: () local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: [[VALUE:%.*]] = alloca i32, align 4, addrspace(200) ; CHECK-NEXT: [[ADDRESS_WITH_BOUNDS:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.bounds.set.i64(ptr addrspace(200) nonnull [[VALUE]], i64 4) @@ -281,7 +281,7 @@ define void @assume_aligned() local_unnamed_addr addrspace(200) nounwind { ; ASM-NEXT: csw a0, 12(csp) ; ASM-NEXT: cincoffset csp, csp, 16 ; ASM-NEXT: cret -; CHECK-LABEL: define {{[^@]+}}@assume_aligned +; CHECK-LABEL: define void @assume_aligned ; CHECK-SAME: () local_unnamed_addr addrspace(200) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = alloca [4 x i8], align 4, addrspace(200) ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr addrspace(200) [[TMP1]], i64 4) ] diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/trunc-load.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/trunc-load.ll index 6e40eef4b5578..4548456cf5166 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/trunc-load.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/trunc-load.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/trunc-load.ll ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d %s -o - < %s | FileCheck %s --check-prefix=PURECAP ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d -o - < %s | FileCheck %s --check-prefix=HYBRID diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/unaligned-loads-stores-hybrid.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/unaligned-loads-stores-hybrid.ll index 55f12e2b16034..08a047319ab3a 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/unaligned-loads-stores-hybrid.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/unaligned-loads-stores-hybrid.ll @@ -13,7 +13,14 @@ define i64 @load_global_i64_align_1(i64 %y) addrspace(200) nounwind { ; CHECK-NEXT: .Lpcrel_hi0: ; CHECK-NEXT: auipc a0, %got_pcrel_hi(a1) ; CHECK-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi0)(a0) -; CHECK-NEXT: cfromptr ca0, ddc, a0 +; CHECK-NEXT: cspecialr ca1, ddc +; CHECK-NEXT: bnez a0, .LBB0_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: cmove ca0, cnull +; CHECK-NEXT: j .LBB0_3 +; CHECK-NEXT: .LBB0_2: +; CHECK-NEXT: csetaddr ca0, ca1, a0 +; CHECK-NEXT: .LBB0_3: ; CHECK-NEXT: lbu.cap a1, (ca0) ; CHECK-NEXT: cincoffset ca2, ca0, 1 ; CHECK-NEXT: cincoffset ca3, ca0, 2 @@ -54,7 +61,14 @@ define i64 @load_global_i64_align_2(i64 %y) addrspace(200) nounwind { ; CHECK-NEXT: .Lpcrel_hi1: ; CHECK-NEXT: auipc a0, %got_pcrel_hi(a2) ; CHECK-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi1)(a0) -; CHECK-NEXT: cfromptr ca0, ddc, a0 +; CHECK-NEXT: cspecialr ca1, ddc +; CHECK-NEXT: bnez a0, .LBB1_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: cmove ca0, cnull +; CHECK-NEXT: j .LBB1_3 +; CHECK-NEXT: .LBB1_2: +; CHECK-NEXT: csetaddr ca0, ca1, a0 +; CHECK-NEXT: .LBB1_3: ; CHECK-NEXT: lhu.cap a1, (ca0) ; CHECK-NEXT: cincoffset ca2, ca0, 2 ; CHECK-NEXT: cincoffset ca3, ca0, 4 @@ -79,7 +93,14 @@ define i64 @load_global_i64_align_4(i64 %y) addrspace(200) nounwind { ; CHECK-NEXT: .Lpcrel_hi2: ; CHECK-NEXT: auipc a0, %got_pcrel_hi(a4) ; CHECK-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi2)(a0) -; CHECK-NEXT: cfromptr ca0, ddc, a0 +; CHECK-NEXT: cspecialr ca1, ddc +; CHECK-NEXT: bnez a0, .LBB2_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: cmove ca0, cnull +; CHECK-NEXT: j .LBB2_3 +; CHECK-NEXT: .LBB2_2: +; CHECK-NEXT: csetaddr ca0, ca1, a0 +; CHECK-NEXT: .LBB2_3: ; CHECK-NEXT: cincoffset ca1, ca0, 4 ; CHECK-NEXT: lwu.cap a1, (ca1) ; CHECK-NEXT: lwu.cap a0, (ca0) @@ -96,7 +117,13 @@ define i64 @load_global_i64_align_8(i64 %y) addrspace(200) nounwind { ; CHECK-NEXT: .Lpcrel_hi3: ; CHECK-NEXT: auipc a0, %got_pcrel_hi(a8) ; CHECK-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi3)(a0) -; CHECK-NEXT: cfromptr ca0, ddc, a0 +; CHECK-NEXT: cspecialr ca1, ddc +; CHECK-NEXT: bnez a0, .LBB3_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: ld.cap a0, (cnull) +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB3_2: +; CHECK-NEXT: csetaddr ca0, ca1, a0 ; CHECK-NEXT: ld.cap a0, (ca0) ; CHECK-NEXT: ret %ret = load i64, i64 addrspace(200)* addrspacecast(i64* @a8 to i64 addrspace(200)*), align 8 @@ -109,29 +136,36 @@ define void @store_global_i64_align_1(i64 %y) addrspace(200) nounwind { ; CHECK-NEXT: .Lpcrel_hi4: ; CHECK-NEXT: auipc a1, %got_pcrel_hi(a1) ; CHECK-NEXT: ld a1, %pcrel_lo(.Lpcrel_hi4)(a1) -; CHECK-NEXT: srli a2, a0, 56 -; CHECK-NEXT: cfromptr ca1, ddc, a1 -; CHECK-NEXT: cincoffset ca3, ca1, 7 -; CHECK-NEXT: sb.cap a2, (ca3) -; CHECK-NEXT: srli a2, a0, 48 -; CHECK-NEXT: cincoffset ca3, ca1, 6 -; CHECK-NEXT: sb.cap a2, (ca3) -; CHECK-NEXT: srli a2, a0, 40 -; CHECK-NEXT: cincoffset ca3, ca1, 5 -; CHECK-NEXT: sb.cap a2, (ca3) -; CHECK-NEXT: srli a2, a0, 32 -; CHECK-NEXT: cincoffset ca3, ca1, 4 -; CHECK-NEXT: sb.cap a2, (ca3) -; CHECK-NEXT: srli a2, a0, 24 -; CHECK-NEXT: cincoffset ca3, ca1, 3 -; CHECK-NEXT: sb.cap a2, (ca3) -; CHECK-NEXT: srli a2, a0, 16 -; CHECK-NEXT: srli a3, a0, 8 +; CHECK-NEXT: cspecialr ca2, ddc +; CHECK-NEXT: bnez a1, .LBB4_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: cmove ca1, cnull +; CHECK-NEXT: j .LBB4_3 +; CHECK-NEXT: .LBB4_2: +; CHECK-NEXT: csetaddr ca1, ca2, a1 +; CHECK-NEXT: .LBB4_3: ; CHECK-NEXT: sb.cap a0, (ca1) -; CHECK-NEXT: cincoffset ca0, ca1, 2 +; CHECK-NEXT: cincoffset ca2, ca1, 7 +; CHECK-NEXT: srli a3, a0, 56 +; CHECK-NEXT: sb.cap a3, (ca2) +; CHECK-NEXT: cincoffset ca2, ca1, 6 +; CHECK-NEXT: srli a3, a0, 48 +; CHECK-NEXT: sb.cap a3, (ca2) +; CHECK-NEXT: cincoffset ca2, ca1, 5 +; CHECK-NEXT: srli a3, a0, 40 +; CHECK-NEXT: sb.cap a3, (ca2) +; CHECK-NEXT: cincoffset ca2, ca1, 4 +; CHECK-NEXT: srli a3, a0, 32 +; CHECK-NEXT: sb.cap a3, (ca2) +; CHECK-NEXT: cincoffset ca2, ca1, 3 +; CHECK-NEXT: srli a3, a0, 24 +; CHECK-NEXT: sb.cap a3, (ca2) +; CHECK-NEXT: cincoffset ca2, ca1, 2 +; CHECK-NEXT: srli a3, a0, 16 ; CHECK-NEXT: cincoffset ca1, ca1, 1 -; CHECK-NEXT: sb.cap a2, (ca0) -; CHECK-NEXT: sb.cap a3, (ca1) +; CHECK-NEXT: srli a0, a0, 8 +; CHECK-NEXT: sb.cap a3, (ca2) +; CHECK-NEXT: sb.cap a0, (ca1) ; CHECK-NEXT: ret store i64 %y, i64 addrspace(200)* addrspacecast(i64* @a1 to i64 addrspace(200)*), align 1 ret void @@ -143,17 +177,24 @@ define void @store_global_i64_align_2(i64 %y) addrspace(200) nounwind { ; CHECK-NEXT: .Lpcrel_hi5: ; CHECK-NEXT: auipc a1, %got_pcrel_hi(a2) ; CHECK-NEXT: ld a1, %pcrel_lo(.Lpcrel_hi5)(a1) -; CHECK-NEXT: srli a2, a0, 48 -; CHECK-NEXT: cfromptr ca1, ddc, a1 -; CHECK-NEXT: cincoffset ca3, ca1, 6 -; CHECK-NEXT: sh.cap a2, (ca3) -; CHECK-NEXT: srli a2, a0, 32 -; CHECK-NEXT: srli a3, a0, 16 +; CHECK-NEXT: cspecialr ca2, ddc +; CHECK-NEXT: bnez a1, .LBB5_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: cmove ca1, cnull +; CHECK-NEXT: j .LBB5_3 +; CHECK-NEXT: .LBB5_2: +; CHECK-NEXT: csetaddr ca1, ca2, a1 +; CHECK-NEXT: .LBB5_3: ; CHECK-NEXT: sh.cap a0, (ca1) -; CHECK-NEXT: cincoffset ca0, ca1, 4 +; CHECK-NEXT: cincoffset ca2, ca1, 6 +; CHECK-NEXT: srli a3, a0, 48 +; CHECK-NEXT: sh.cap a3, (ca2) +; CHECK-NEXT: cincoffset ca2, ca1, 4 +; CHECK-NEXT: srli a3, a0, 32 ; CHECK-NEXT: cincoffset ca1, ca1, 2 -; CHECK-NEXT: sh.cap a2, (ca0) -; CHECK-NEXT: sh.cap a3, (ca1) +; CHECK-NEXT: srli a0, a0, 16 +; CHECK-NEXT: sh.cap a3, (ca2) +; CHECK-NEXT: sh.cap a0, (ca1) ; CHECK-NEXT: ret store i64 %y, i64 addrspace(200)* addrspacecast(i64* @a2 to i64 addrspace(200)*), align 2 ret void @@ -165,10 +206,17 @@ define void @store_global_i64_align_4(i64 %y) addrspace(200) nounwind { ; CHECK-NEXT: .Lpcrel_hi6: ; CHECK-NEXT: auipc a1, %got_pcrel_hi(a4) ; CHECK-NEXT: ld a1, %pcrel_lo(.Lpcrel_hi6)(a1) -; CHECK-NEXT: cfromptr ca1, ddc, a1 +; CHECK-NEXT: cspecialr ca2, ddc +; CHECK-NEXT: bnez a1, .LBB6_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: cmove ca1, cnull +; CHECK-NEXT: j .LBB6_3 +; CHECK-NEXT: .LBB6_2: +; CHECK-NEXT: csetaddr ca1, ca2, a1 +; CHECK-NEXT: .LBB6_3: ; CHECK-NEXT: sw.cap a0, (ca1) -; CHECK-NEXT: srli a0, a0, 32 ; CHECK-NEXT: cincoffset ca1, ca1, 4 +; CHECK-NEXT: srli a0, a0, 32 ; CHECK-NEXT: sw.cap a0, (ca1) ; CHECK-NEXT: ret store i64 %y, i64 addrspace(200)* addrspacecast(i64* @a4 to i64 addrspace(200)*), align 4 @@ -181,7 +229,13 @@ define void @store_global_i64_align_8(i64 %y) addrspace(200) nounwind { ; CHECK-NEXT: .Lpcrel_hi7: ; CHECK-NEXT: auipc a1, %got_pcrel_hi(a8) ; CHECK-NEXT: ld a1, %pcrel_lo(.Lpcrel_hi7)(a1) -; CHECK-NEXT: cfromptr ca1, ddc, a1 +; CHECK-NEXT: cspecialr ca2, ddc +; CHECK-NEXT: bnez a1, .LBB7_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: sd.cap a0, (cnull) +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB7_2: +; CHECK-NEXT: csetaddr ca1, ca2, a1 ; CHECK-NEXT: sd.cap a0, (ca1) ; CHECK-NEXT: ret store i64 %y, i64 addrspace(200)* addrspacecast(i64* @a8 to i64 addrspace(200)*), align 8 diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/unaligned-loads-stores-purecap.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/unaligned-loads-stores-purecap.ll index aa3fec2b58c7d..a1379eddabfed 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/unaligned-loads-stores-purecap.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/unaligned-loads-stores-purecap.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --version 2 ; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/unaligned-loads-stores-purecap.ll ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d %s -o - | FileCheck %s diff --git a/llvm/test/CodeGen/RISCV/cheri/hoist-setbounds.mir b/llvm/test/CodeGen/RISCV/cheri/hoist-setbounds.mir index 5b384ccfa7002..9c1a28dc0e8ca 100644 --- a/llvm/test/CodeGen/RISCV/cheri/hoist-setbounds.mir +++ b/llvm/test/CodeGen/RISCV/cheri/hoist-setbounds.mir @@ -3,15 +3,12 @@ # RUN: %riscv64_cheri_purecap_llc -o - %s -run-pass=early-machinelicm -avoid-speculation=false \ # RUN: -hoist-cheap-insts -machinelicm-ignore-reg-pressure | FileCheck %s --- | - ; ModuleID = '' source_filename = "" target datalayout = "e-m:e-pf200:128:128:128:64-p:64:64-i64:64-i128:128-n64-S128-A200-P200-G200" target triple = "riscv64-unknown-freebsd" - ; Function Attrs: nounwind declare void @call(i32 addrspace(200)*, i32 addrspace(200)*) local_unnamed_addr addrspace(200) #0 - ; Function Attrs: nounwind define void @hoist_alloca_cond(i32 signext %cond) local_unnamed_addr addrspace(200) #0 { entry: %buf1 = alloca [123 x i32], align 4, addrspace(200) @@ -44,7 +41,6 @@ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body } - ; Function Attrs: nounwind readnone willreturn declare i8 addrspace(200)* @llvm.cheri.bounded.stack.cap.i64(i8 addrspace(200)*, i64) addrspace(200) #1 attributes #0 = { nounwind "target-features"="+cap-mode,+xcheri" } @@ -107,24 +103,31 @@ body: | ; CHECK-NEXT: [[CSetBoundsExact:%[0-9]+]]:gpcr = CSetBoundsExact [[CIncOffsetImm]], [[ADDI]] ; CHECK-NEXT: [[CSetBoundsImm:%[0-9]+]]:gpcr = CSetBoundsImm [[CIncOffsetImm]], 492 ; CHECK-NEXT: [[CIncOffsetImm1:%[0-9]+]]:gpcr = CIncOffsetImm %stack.0.buf1, 1 + ; CHECK-NEXT: [[CSetBounds1:%[0-9]+]]:gpcr = CSetBounds [[CIncOffsetImm1]], [[ADDI]] + ; CHECK-NEXT: [[CSetBoundsExact1:%[0-9]+]]:gpcr = CSetBoundsExact [[CIncOffsetImm1]], [[ADDI]] + ; CHECK-NEXT: [[CSetBoundsImm1:%[0-9]+]]:gpcr = CSetBoundsImm [[CIncOffsetImm1]], 492 ; CHECK-NEXT: [[ADDI2:%[0-9]+]]:gpr = ADDI $x0, 491 - ; CHECK-NEXT: [[CSetBounds1:%[0-9]+]]:gpcr = CSetBounds [[CIncOffsetImm1]], [[ADDI2]] - ; CHECK-NEXT: [[CSetBoundsExact1:%[0-9]+]]:gpcr = CSetBoundsExact [[CIncOffsetImm1]], [[ADDI2]] - ; CHECK-NEXT: [[CSetBoundsImm1:%[0-9]+]]:gpcr = CSetBoundsImm [[CIncOffsetImm1]], 491 + ; CHECK-NEXT: [[CSetBounds2:%[0-9]+]]:gpcr = CSetBounds [[CIncOffsetImm1]], [[ADDI2]] + ; CHECK-NEXT: [[CSetBoundsExact2:%[0-9]+]]:gpcr = CSetBoundsExact [[CIncOffsetImm1]], [[ADDI2]] + ; CHECK-NEXT: [[CSetBoundsImm2:%[0-9]+]]:gpcr = CSetBoundsImm [[CIncOffsetImm1]], 491 ; CHECK-NEXT: [[ADDI3:%[0-9]+]]:gpr = ADDI $x0, 88 ; CHECK-NEXT: [[CIncOffsetImm2:%[0-9]+]]:gpcr = CIncOffsetImm %stack.1.buf2, 0 - ; CHECK-NEXT: [[CSetBounds2:%[0-9]+]]:gpcr = CSetBounds [[CIncOffsetImm2]], [[ADDI3]] - ; CHECK-NEXT: [[CSetBoundsExact2:%[0-9]+]]:gpcr = CSetBoundsExact [[CIncOffsetImm2]], [[ADDI3]] + ; CHECK-NEXT: [[CSetBounds3:%[0-9]+]]:gpcr = CSetBounds [[CIncOffsetImm2]], [[ADDI3]] + ; CHECK-NEXT: [[CSetBoundsExact3:%[0-9]+]]:gpcr = CSetBoundsExact [[CIncOffsetImm2]], [[ADDI3]] ; CHECK-NEXT: [[CIncOffsetImm3:%[0-9]+]]:gpcr = CIncOffsetImm [[CIncOffsetImm1]], -1 - ; CHECK-NEXT: [[CSetBounds3:%[0-9]+]]:gpcr = CSetBounds [[CIncOffsetImm3]], [[ADDI3]] - ; CHECK-NEXT: [[CSetBoundsExact3:%[0-9]+]]:gpcr = CSetBoundsExact [[CIncOffsetImm3]], [[ADDI3]] - ; CHECK-NEXT: [[CSetBoundsImm2:%[0-9]+]]:gpcr = CSetBoundsImm [[CIncOffsetImm3]], 492 + ; CHECK-NEXT: [[CSetBounds4:%[0-9]+]]:gpcr = CSetBounds [[CIncOffsetImm3]], [[ADDI3]] + ; CHECK-NEXT: [[CSetBoundsExact4:%[0-9]+]]:gpcr = CSetBoundsExact [[CIncOffsetImm3]], [[ADDI3]] + ; CHECK-NEXT: [[CSetBoundsImm3:%[0-9]+]]:gpcr = CSetBoundsImm [[CIncOffsetImm3]], 492 ; CHECK-NEXT: [[CIncOffsetImm4:%[0-9]+]]:gpcr = CIncOffsetImm $c0, 1 ; CHECK-NEXT: [[CIncOffsetImm5:%[0-9]+]]:gpcr = CIncOffsetImm [[CIncOffsetImm4]], -1 + ; CHECK-NEXT: [[CSetBounds5:%[0-9]+]]:gpcr = CSetBounds [[CIncOffsetImm5]], [[ADDI3]] + ; CHECK-NEXT: [[CSetBoundsExact5:%[0-9]+]]:gpcr = CSetBoundsExact [[CIncOffsetImm5]], [[ADDI3]] + ; CHECK-NEXT: [[CSetBoundsImm4:%[0-9]+]]:gpcr = CSetBoundsImm [[CIncOffsetImm5]], 492 ; CHECK-NEXT: [[CSetOffset:%[0-9]+]]:gpcr = CSetOffset [[CIncOffsetImm1]], $x0 - ; CHECK-NEXT: [[CSetBoundsImm3:%[0-9]+]]:gpcr = CSetBoundsImm [[CIncOffsetImm1]], 0 + ; CHECK-NEXT: [[CSetBoundsImm5:%[0-9]+]]:gpcr = CSetBoundsImm [[CIncOffsetImm1]], 0 ; CHECK-NEXT: [[CIncOffsetImm6:%[0-9]+]]:gpcr = CIncOffsetImm %stack.0.buf1, 492 - ; CHECK-NEXT: [[CSetBoundsImm4:%[0-9]+]]:gpcr = CSetBoundsImm [[CIncOffsetImm6]], 0 + ; CHECK-NEXT: [[CSetBoundsImm6:%[0-9]+]]:gpcr = CSetBoundsImm [[CIncOffsetImm6]], 0 + ; CHECK-NEXT: [[CSetBoundsImm7:%[0-9]+]]:gpcr = CSetBoundsImm [[CIncOffsetImm6]], 1 ; CHECK-NEXT: PseudoBR %bb.2 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1.for.cond.cleanup: @@ -139,15 +142,6 @@ body: | ; CHECK-NEXT: bb.3.if.then: ; CHECK-NEXT: successors: %bb.4(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[CSetBounds4:%[0-9]+]]:gpcr = CSetBounds [[CIncOffsetImm1]], [[ADDI]] - ; CHECK-NEXT: [[CSetBoundsExact4:%[0-9]+]]:gpcr = CSetBoundsExact [[CIncOffsetImm1]], [[ADDI]] - ; CHECK-NEXT: [[CSetBoundsImm5:%[0-9]+]]:gpcr = CSetBoundsImm [[CIncOffsetImm1]], 492 - ; CHECK-NEXT: [[CSetBoundsImm6:%[0-9]+]]:gpcr = CSetBoundsImm [[CIncOffsetImm1]], 492 - ; CHECK-NEXT: [[CSetBounds5:%[0-9]+]]:gpcr = CSetBounds [[CIncOffsetImm5]], [[ADDI3]] - ; CHECK-NEXT: [[CSetBoundsExact5:%[0-9]+]]:gpcr = CSetBoundsExact [[CIncOffsetImm5]], [[ADDI3]] - ; CHECK-NEXT: [[CSetBoundsImm7:%[0-9]+]]:gpcr = CSetBoundsImm [[CIncOffsetImm5]], 492 - ; CHECK-NEXT: [[CSetBoundsImm8:%[0-9]+]]:gpcr = CSetBoundsImm [[CIncOffsetImm6]], 1 - ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.4.for.inc: ; CHECK-NEXT: successors: %bb.1(0x04000000), %bb.2(0x7c000000) ; CHECK-NEXT: {{ $}} @@ -172,17 +166,14 @@ body: | successors: %bb.4(0x80000000) %5:gpr = ADDI $x0, 492 %6:gpr = ADDI $x0, 493 - ; These are fine %10:gpcr = CIncOffsetImm %stack.0.buf1, 0 %11:gpcr = CSetBounds %10, %5 %12:gpcr = CSetBoundsExact %10, %5 %13:gpcr = CSetBoundsImm %10, 492 - ; These will trap since the length is too big: %14:gpcr = CSetBounds %10, %5 %15:gpcr = CSetBoundsExact %10, %5 %16:gpcr = CSetBoundsImm %10, 492 - ; plus + 1 -> OOB, so shouldn't be hoisted: %30:gpcr = CIncOffsetImm %stack.0.buf1, 1 %31:gpcr = CSetBounds %30, %5 %32:gpcr = CSetBoundsExact %30, %5 @@ -193,7 +184,6 @@ body: | %37:gpcr = CSetBoundsImm %30, 491 %38:gpcr = CSetBoundsImm %30, 492 ; OOB - ; One more check with a different buffer size %50:gpr = ADDI $x0, 88 %51:gpcr = CIncOffsetImm %stack.1.buf2, 0 %52:gpcr = CSetBounds %51, %50 @@ -201,7 +191,6 @@ body: | %54:gpcr = CSetBounds %51, %50 - ; And some more checks for non-stack values or multiple incoffset %100:gpcr = CIncOffsetImm %stack.0.buf1, 1 %101:gpcr = CIncOffsetImm %100, -1 %102:gpcr = CSetBounds %101, %50 @@ -214,10 +203,8 @@ body: | %113:gpcr = CSetBoundsExact %111, %50 %114:gpcr = CSetBoundsImm %111, 492 - ; Non-CIncOffset instructions are not handled: %120:gpcr = CSetOffset %100, $x0 %121:gpcr = CSetBoundsImm %100, 0 - ; OOB with size zero/one %122:gpcr = CIncOffsetImm %stack.0.buf1, 492 %123:gpcr = CSetBoundsImm %122, 0 %124:gpcr = CSetBoundsImm %122, 1 diff --git a/llvm/test/CodeGen/RISCV/cheri/isav9-cap-from-ptr.ll b/llvm/test/CodeGen/RISCV/cheri/isav9-cap-from-ptr.ll index 4a1e387c6393f..835c0b55624b2 100644 --- a/llvm/test/CodeGen/RISCV/cheri/isav9-cap-from-ptr.ll +++ b/llvm/test/CodeGen/RISCV/cheri/isav9-cap-from-ptr.ll @@ -1,17 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -;; The CFromPtr instruction is not part of ISAv9, check that we emit `x == 0 ? null : csetoffset(auth, x)` instead. -; RUN: %riscv64_cheri_purecap_llc < %s | FileCheck %s --check-prefix=ISAV8 -; RUN: %riscv64_cheri_purecap_llc -mattr=+xcheri-v9-semantics < %s | FileCheck %s --check-prefix=ISAV9 +;; The CFromPtr instruction is not part of ISAv9, check that we emit `x == 0 ? null : csetaddr(auth, x)` instead. +;; Note: Since DDC-offsetting is no longer part of ISAv9, the expansion consistently uses csetaddr and not csetoffset! +; RUN: %riscv64_cheri_purecap_llc < %s | FileCheck %s --check-prefix=ISAV9 ;; (int_cheri_cap_from_ptr auth, x) -> x == 0 ? null : csetoffset(auth, x) define dso_local i8 addrspace(200)* @cap_from_ptr(i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %cap, i64 %offset) nounwind { -; ISAV8-LABEL: cap_from_ptr: -; ISAV8: # %bb.0: # %entry -; ISAV8-NEXT: cfromptr ca1, ca1, a2 -; ISAV8-NEXT: csc ca1, 0(ca0) -; ISAV8-NEXT: cmove ca0, ca1 -; ISAV8-NEXT: cret -; ; ISAV9-LABEL: cap_from_ptr: ; ISAV9: # %bb.0: # %entry ; ISAV9-NEXT: bnez a2, .LBB0_2 @@ -30,16 +23,8 @@ entry: ret i8 addrspace(200)* %new } -;; (int_cheri_cap_from_ptr ddc, y) -> x == 0 ? null : csetoffset(ddc, x) -;; NB: This is not the same as (inttoptr x), since the explicit intrinsic retains the offsetting semantics. +;; (int_cheri_cap_from_ptr ddc, y) -> x == 0 ? null : csetaddr(ddc, x) define dso_local i8 addrspace(200)* @cap_from_ptr_ddc(i8 addrspace(200)* addrspace(200)* %ptr, i64 %offset) nounwind { -; ISAV8-LABEL: cap_from_ptr_ddc: -; ISAV8: # %bb.0: # %entry -; ISAV8-NEXT: cfromptr ca1, ddc, a1 -; ISAV8-NEXT: csc ca1, 0(ca0) -; ISAV8-NEXT: cmove ca0, ca1 -; ISAV8-NEXT: cret -; ; ISAV9-LABEL: cap_from_ptr_ddc: ; ISAV9: # %bb.0: # %entry ; ISAV9-NEXT: cspecialr ca2, ddc diff --git a/llvm/test/CodeGen/RISCV/cheri/isav9-cap-to-ptr.ll b/llvm/test/CodeGen/RISCV/cheri/isav9-cap-to-ptr.ll index 9ac8cf7262ce6..0d4433617af18 100644 --- a/llvm/test/CodeGen/RISCV/cheri/isav9-cap-to-ptr.ll +++ b/llvm/test/CodeGen/RISCV/cheri/isav9-cap-to-ptr.ll @@ -1,18 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ;; The CToPtr instruction is not part of ISAv9, check that we emit the ;; optimized form of `x.tag ? x.addr : 0` instead. -; RUN: %riscv64_cheri_purecap_llc < %s | FileCheck %s --check-prefix=ISAV8 -; RUN: %riscv64_cheri_purecap_llc -mattr=+xcheri-v9-semantics < %s | FileCheck %s --check-prefix=ISAV9 +; RUN: %riscv64_cheri_purecap_llc < %s | FileCheck %s --check-prefix=ISAV9 ;; (int_cheri_cap_from_ptr x, y) -> y == 0 ? null : csetaddr(x, y) define dso_local i64 @cap_to_ptr(i64 addrspace(200)* %dst, i8 addrspace(200)* %auth, i8 addrspace(200)* %cap) nounwind { -; ISAV8-LABEL: cap_to_ptr: -; ISAV8: # %bb.0: # %entry -; ISAV8-NEXT: ctoptr a1, ca1, ca2 -; ISAV8-NEXT: csd a1, 0(ca0) -; ISAV8-NEXT: mv a0, a1 -; ISAV8-NEXT: cret -; ; ISAV9-LABEL: cap_to_ptr: ; ISAV9: # %bb.0: # %entry ; ISAV9-NEXT: cgettag a1, ca2 @@ -28,13 +20,6 @@ entry: } define dso_local i64 @cap_from_ptr_ddc(i64 addrspace(200)* %dst, i8 addrspace(200)* %cap) nounwind { -; ISAV8-LABEL: cap_from_ptr_ddc: -; ISAV8: # %bb.0: # %entry -; ISAV8-NEXT: ctoptr a1, ca1, ddc -; ISAV8-NEXT: csd a1, 0(ca0) -; ISAV8-NEXT: mv a0, a1 -; ISAV8-NEXT: cret -; ; ISAV9-LABEL: cap_from_ptr_ddc: ; ISAV9: # %bb.0: # %entry ; ISAV9-NEXT: cgettag a2, ca1 diff --git a/llvm/test/CodeGen/RISCV/cheri/isav9-inttoptr.ll b/llvm/test/CodeGen/RISCV/cheri/isav9-inttoptr.ll index b3f4be08136ac..99dd40babc6ce 100644 --- a/llvm/test/CodeGen/RISCV/cheri/isav9-inttoptr.ll +++ b/llvm/test/CodeGen/RISCV/cheri/isav9-inttoptr.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ;; Check that we don't emit CFromPtr when lowering inttoptr for ISAv9. -; RUN: %riscv64_cheri_purecap_llc -mattr=+xcheri-v9-semantics < %s | FileCheck %s --check-prefix=PURECAP -; RUN: %riscv64_cheri_llc -mattr=+xcheri-v9-semantics < %s | FileCheck %s --check-prefix=HYBRID +; RUN: %riscv64_cheri_purecap_llc < %s | FileCheck %s --check-prefix=PURECAP +; RUN: %riscv64_cheri_llc < %s | FileCheck %s --check-prefix=HYBRID define dso_local i8 addrspace(200)* @inttoptr(i64 %ptr) addrspace(200) nounwind { ; PURECAP-LABEL: inttoptr: diff --git a/llvm/test/CodeGen/RISCV/cheri/isav9-ptrtoint.ll b/llvm/test/CodeGen/RISCV/cheri/isav9-ptrtoint.ll index a4fa890829524..085760b7d17f7 100644 --- a/llvm/test/CodeGen/RISCV/cheri/isav9-ptrtoint.ll +++ b/llvm/test/CodeGen/RISCV/cheri/isav9-ptrtoint.ll @@ -2,8 +2,8 @@ ;; Check that we do not emit CToPtr in ISAv9 hybrid mode. Since there is no DDC ;; offsetting, hybrid mode can expand ptrtoint to to a branchless version of ;; `x.tag ? x.addr : 0` -; RUN: %riscv64_cheri_purecap_llc -mattr=+xcheri-v9-semantics < %s | FileCheck %s --check-prefix=PURECAP -; RUN: %riscv64_cheri_llc -mattr=+xcheri-v9-semantics < %s | FileCheck %s --check-prefix=HYBRID +; RUN: %riscv64_cheri_purecap_llc < %s | FileCheck %s --check-prefix=PURECAP +; RUN: %riscv64_cheri_llc < %s | FileCheck %s --check-prefix=HYBRID define dso_local i64 @ptrtoint(i8 addrspace(200)* %cap) addrspace(200) nounwind { ; PURECAP-LABEL: ptrtoint: diff --git a/llvm/test/CodeGen/RISCV/cheri/machinelicm-hoist.mir b/llvm/test/CodeGen/RISCV/cheri/machinelicm-hoist.mir index eb59450a7d8a8..a29c3b352229d 100644 --- a/llvm/test/CodeGen/RISCV/cheri/machinelicm-hoist.mir +++ b/llvm/test/CodeGen/RISCV/cheri/machinelicm-hoist.mir @@ -2,11 +2,8 @@ ## Check that MachineLICM doesn't hoist a instructions that could potentially trap but are not guaranteed to execute ## See also llvm/test/CodeGen/MIPS/machinelicm-hoist.mir for CHERI-MIPS ## See https://github.com/CTSRD-CHERI/llvm-project/issues/348 and https://github.com/CTSRD-CHERI/llvm-project/issues/359 -## TODO: Currently, we default to the v8 (trapping) semantics, but in the future this will change. -# RUN: %riscv64_cheri_purecap_llc -o - %s -run-pass=early-machinelicm -avoid-speculation=false \ -# RUN: -hoist-cheap-insts -machinelicm-ignore-reg-pressure | FileCheck %s # RUN: %riscv64_cheri_purecap_llc -mtriple=riscv64 -target-abi l64pc128 -o - %s -run-pass=early-machinelicm -avoid-speculation=false \ -# RUN: -hoist-cheap-insts -machinelicm-ignore-reg-pressure --mattr=+xcheri-v9-semantics | FileCheck %s --check-prefix=TAG-CLEAR +# RUN: -hoist-cheap-insts -machinelicm-ignore-reg-pressure | FileCheck %s --check-prefix=TAG-CLEAR --- | source_filename = "/Users/alex/cheri/llvm-project/llvm/test/CodeGen/CHERI-Generic/test.ll" target datalayout = "e-m:e-pf200:128:128:128:64-p:64:64-i64:64-i128:128-n64-S128-A200-P200-G200" @@ -233,8 +230,6 @@ body: | ; TAG-CLEAR-NEXT: [[CCSeal:%[0-9]+]]:gpcr = CCSeal [[COPY1]], [[COPY2]] ; TAG-CLEAR-NEXT: [[CSealEntry:%[0-9]+]]:gpcr = CSealEntry [[COPY1]] ; TAG-CLEAR-NEXT: [[CSetHigh:%[0-9]+]]:gpcr = CSetHigh [[COPY1]], [[COPY]] - ; TAG-CLEAR-NEXT: [[CToPtr:%[0-9]+]]:gpr = CToPtr [[COPY1]], [[COPY3]] - ; TAG-CLEAR-NEXT: [[CFromPtr:%[0-9]+]]:gpcr = CFromPtr [[COPY3]], [[COPY]] ; TAG-CLEAR-NEXT: [[CMove:%[0-9]+]]:gpcr = CMove [[COPY1]] ; TAG-CLEAR-NEXT: [[CTestSubset:%[0-9]+]]:gpr = CTestSubset [[COPY3]], [[COPY2]] ; TAG-CLEAR-NEXT: [[CRRL:%[0-9]+]]:gpr = CRRL [[COPY]] @@ -319,10 +314,8 @@ body: | %1:gpcr = COPY $c11 %2:gpcr = COPY $c12 %3:gpcrc0isddc = COPY $c12 - ; %4/%5 should be known to be unsealed (since CIncOffset succeeded) %4:gpcr = CIncOffsetImm %1, 4 %5:gpcrc0isddc = COPY %4 - ; %6/%7, should be known to be tagged and unsealed %6:gpcr = CAndPerm %1, %0 %7:gpcrc0isddc = COPY %6 @@ -351,7 +344,7 @@ body: | %16:gpr = CGetOffset %2 %17:gpr = PseudoCGetAddr %2 %18:gpr = CGetHigh %2 - ; However, mutators can't since the input could be sealed/untagged/other constraints + ; Mutators can also be hoisted since we default to non-trapping %30:gpcr = CSeal %1, %2 %31:gpcr = CUnseal %1, %2 %32:gpcr = CAndPerm %1, %0 @@ -371,8 +364,6 @@ body: | %46:gpcr = CSetHigh %1, %0 ; Pointer-arithmetic instructions - %60:gpr = CToPtr %1, %3 ; can trap - %61:gpcr = CFromPtr %3, %0 ; can trap %63:gpcr = CMove %1 ; never traps ; Assertion Instructions diff --git a/llvm/test/MC/RISCV/cheri/rv32xcheri-valid.s b/llvm/test/MC/RISCV/cheri/rv32xcheri-valid.s index a2e288bda463b..49e53f03fb36e 100644 --- a/llvm/test/MC/RISCV/cheri/rv32xcheri-valid.s +++ b/llvm/test/MC/RISCV/cheri/rv32xcheri-valid.s @@ -101,18 +101,6 @@ ccseal c1, c2, c3 # CHECK: encoding: [0xdb,0x00,0x11,0xff] csealentry c1, c2 -# CHECK-INST: ctoptr ra, csp, cgp -# CHECK: encoding: [0xdb,0x00,0x31,0x24] -ctoptr x1, c2, c3 -# CHECK-INST: ctoptr ra, csp, ddc -# CHECK: encoding: [0xdb,0x00,0x01,0x24] -ctoptr x1, c2, ddc -# CHECK-INST: cfromptr cra, csp, gp -# CHECK: encoding: [0xdb,0x00,0x31,0x26] -cfromptr c1, c2, x3 -# CHECK-INST: cfromptr cra, ddc, gp -# CHECK: encoding: [0xdb,0x00,0x30,0x26] -cfromptr c1, ddc, x3 # CHECK-INST: sub ra, sp, gp # CHECK: encoding: [0xb3,0x00,0x31,0x40] csub x1, c2, c3