diff --git a/clang/include/clang/CodeGen/QualTypeMapper.h b/clang/include/clang/CodeGen/QualTypeMapper.h new file mode 100644 index 0000000000000..81dd074cdad05 --- /dev/null +++ b/clang/include/clang/CodeGen/QualTypeMapper.h @@ -0,0 +1,81 @@ +//==---- QualTypeMapper.h - Maps Clang QualType to LLVMABI Types -----------==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// Maps Clang QualType instances to corresponding LLVM ABI type +/// representations. This mapper translates high-level type information from the +/// AST into low-level ABI-specific types that encode size, alignment, and +/// layout details required for code generation and cross-language +/// interoperability. +/// +//===----------------------------------------------------------------------===// +#ifndef CLANG_CODEGEN_QUALTYPE_MAPPER_H +#define CLANG_CODEGEN_QUALTYPE_MAPPER_H + +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/AST/Type.h" +#include "clang/AST/TypeOrdering.h" +#include "llvm/ABI/Types.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/Support/Allocator.h" + +namespace clang { +namespace CodeGen { + +class QualTypeMapper { +private: + clang::ASTContext &ASTCtx; + llvm::abi::TypeBuilder Builder; + + llvm::DenseMap TypeCache; + + const llvm::abi::Type *convertBuiltinType(const clang::BuiltinType *BT); + const llvm::abi::Type *convertPointerType(const clang::PointerType *PT); + const llvm::abi::Type *convertArrayType(const clang::ArrayType *AT); + const llvm::abi::Type *convertVectorType(const clang::VectorType *VT); + const llvm::abi::Type *convertRecordType(const clang::RecordType *RT); + const llvm::abi::Type *convertEnumType(const clang::EnumType *ET); + const llvm::abi::Type *convertReferenceType(const ReferenceType *RT); + const llvm::abi::Type *convertComplexType(const ComplexType *CT); + const llvm::abi::Type * + convertMemberPointerType(const clang::MemberPointerType *MPT); + const llvm::abi::Type *convertMatrixType(const ConstantMatrixType *MT); + + const llvm::abi::RecordType *convertStructType(const clang::RecordDecl *RD); + const llvm::abi::RecordType *convertUnionType(const clang::RecordDecl *RD, + bool IsTransparent = false); + const llvm::abi::Type *createPointerTypeForPointee(QualType PointeeType); + const llvm::abi::RecordType *convertCXXRecordType(const CXXRecordDecl *RD, + bool CanPassInRegs); + + void computeFieldInfo(const clang::RecordDecl *RD, + SmallVectorImpl &Fields, + const clang::ASTRecordLayout &Layout); + + llvm::TypeSize getTypeSize(clang::QualType QT) const; + llvm::Align getTypeAlign(clang::QualType QT) const; + llvm::Align getPreferredTypeAlign(QualType QT) const; + uint64_t getPointerSize() const; + uint64_t getPointerAlign() const; + +public: + explicit QualTypeMapper(clang::ASTContext &Ctx, llvm::BumpPtrAllocator &Alloc) + : ASTCtx(Ctx), Builder(Alloc) {} + + const llvm::abi::Type *convertType(clang::QualType QT); + + void clearCache() { TypeCache.clear(); } + + llvm::abi::TypeBuilder getTypeBuilder() { return Builder; } +}; + +} // namespace CodeGen +} // namespace clang + +#endif // CLANG_CODEGEN_QUALTYPE_MAPPER_H diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index 5b0dd235b58da..caa286fa85eed 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -30,7 +30,11 @@ #include "clang/Basic/CodeGenOptions.h" #include "clang/Basic/TargetInfo.h" #include "clang/CodeGen/CGFunctionInfo.h" +#include "clang/CodeGen/QualTypeMapper.h" #include "clang/CodeGen/SwiftCallingConv.h" +#include "llvm/ABI/ABIFunctionInfo.h" +#include "llvm/ABI/ABITypeMapper.h" +#include "llvm/ABI/Types.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Assumptions.h" @@ -42,6 +46,7 @@ #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Type.h" +#include "llvm/TargetParser/Triple.h" #include "llvm/Transforms/Utils/Local.h" #include using namespace clang; @@ -825,6 +830,196 @@ void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI); } } // namespace clang +static QualType getIntegerTypeForExtension(ASTContext &Ctx, llvm::Type *LLVMTy, + bool IsSigned) { + if (auto *IntTy = dyn_cast(LLVMTy)) { + unsigned BitWidth = IntTy->getBitWidth(); + return Ctx.getIntTypeForBitwidth(BitWidth, IsSigned); + } + return QualType(); +} + +static bool shouldSignExtend(QualType Ty) { + if (Ty->isSignedIntegerType() || Ty->isEnumeralType()) { + if (const EnumType *ET = Ty->getAs()) { + return ET->getOriginalDecl()->getIntegerType()->isSignedIntegerType(); + } + return true; + } + return false; +} + +ABIArgInfo CodeGenTypes::convertABIArgInfo(const llvm::abi::ABIArgInfo &AbiInfo, + QualType type) { + ABIArgInfo result; + + // To maintain coherence with clang's current impl. + if (type->isConstantMatrixType()) { + const auto *MT = type->getAs(); + unsigned NumElements = MT->getNumRows() * MT->getNumColumns(); + llvm::Type *ElementType = ConvertType(MT->getElementType()); + llvm::Type *VectorType = llvm::VectorType::get( + ElementType, llvm::ElementCount::getFixed(NumElements)); + + result = ABIArgInfo::getDirect(VectorType); + return result; + } + switch (AbiInfo.getKind()) { + case llvm::abi::ABIArgInfo::Direct: { + llvm::Type *CoercedType = nullptr; + if (AbiInfo.getCoerceToType()) + CoercedType = ReverseMapper.convertType(AbiInfo.getCoerceToType()); + if (!CoercedType) + CoercedType = ConvertType(type); + + llvm::Type *PaddingType = nullptr; + if (AbiInfo.getPaddingType()) + PaddingType = ReverseMapper.convertType(AbiInfo.getPaddingType()); + + result = ABIArgInfo::getDirect(CoercedType, AbiInfo.getDirectOffset(), + PaddingType, AbiInfo.getCanBeFlattened(), + AbiInfo.getDirectAlign()); + + if (AbiInfo.isInReg()) + result.setInReg(true); + if (AbiInfo.hasPaddingInReg()) + result.setPaddingInReg(true); + break; + } + case llvm::abi::ABIArgInfo::Extend: { + llvm::Type *CoercedType = nullptr; + QualType ExtendType = type; + + if (AbiInfo.getCoerceToType()) { + CoercedType = ReverseMapper.convertType(AbiInfo.getCoerceToType()); + + if (CoercedType && CoercedType->isIntegerTy()) { + bool IsSigned = AbiInfo.isSignExt() || + (AbiInfo.isNoExt() && shouldSignExtend(type)); + ExtendType = + getIntegerTypeForExtension(getContext(), CoercedType, IsSigned); + + if (ExtendType.isNull()) { + ExtendType = type; + if (type->isUnionType() || !type->isIntegralOrEnumerationType()) { + unsigned BitWidth = + cast(CoercedType)->getBitWidth(); + ExtendType = getContext().getIntTypeForBitwidth(BitWidth, IsSigned); + } + } + } + } + + if (!CoercedType) + CoercedType = ConvertType(type); + + if (!ExtendType->isIntegralOrEnumerationType()) { + if (CoercedType && CoercedType->isIntegerTy()) { + unsigned BitWidth = cast(CoercedType)->getBitWidth(); + bool IsSigned = AbiInfo.isSignExt() || shouldSignExtend(type); + ExtendType = getContext().getIntTypeForBitwidth(BitWidth, IsSigned); + } else { + ExtendType = getContext().IntTy; + } + } + + if (AbiInfo.isSignExt()) { + result = ABIArgInfo::getSignExtend(ExtendType, CoercedType); + } else if (AbiInfo.isZeroExt()) { + result = ABIArgInfo::getZeroExtend(ExtendType, CoercedType); + } else { + result = ABIArgInfo::getExtend(ExtendType, CoercedType); + } + + if (AbiInfo.isInReg()) + result.setInReg(true); + break; + } + case llvm::abi::ABIArgInfo::Indirect: { + CharUnits Alignment = CharUnits::fromQuantity(AbiInfo.getIndirectAlign()); + + llvm::Type *PaddingType = nullptr; + if (AbiInfo.getPaddingType()) + PaddingType = ReverseMapper.convertType(AbiInfo.getPaddingType()); + + result = ABIArgInfo::getIndirect(Alignment, AbiInfo.getIndirectAddrSpace(), + AbiInfo.getIndirectByVal(), + AbiInfo.getIndirectRealign(), PaddingType); + + if (AbiInfo.isInReg()) + result.setInReg(true); + if (AbiInfo.isSRetAfterThis()) + result.setSRetAfterThis(true); + break; + } + case llvm::abi::ABIArgInfo::IndirectAliased: { + CharUnits Alignment = CharUnits::fromQuantity(AbiInfo.getIndirectAlign()); + + llvm::Type *PaddingType = nullptr; + if (AbiInfo.getPaddingType()) + PaddingType = ReverseMapper.convertType(AbiInfo.getPaddingType()); + + result = ABIArgInfo::getIndirectAliased( + Alignment, AbiInfo.getIndirectAddrSpace(), AbiInfo.getIndirectRealign(), + PaddingType); + break; + } + + case llvm::abi::ABIArgInfo::Ignore: { + result = ABIArgInfo::getIgnore(); + break; + } + + case llvm::abi::ABIArgInfo::Expand: { + llvm::Type *PaddingType = nullptr; + if (AbiInfo.getPaddingType()) + PaddingType = ReverseMapper.convertType(AbiInfo.getPaddingType()); + + if (PaddingType) { + result = ABIArgInfo::getExpandWithPadding(AbiInfo.hasPaddingInReg(), + PaddingType); + } else { + result = ABIArgInfo::getExpand(); + } + break; + } + + case llvm::abi::ABIArgInfo::CoerceAndExpand: { + llvm::Type *CoerceType = nullptr; + llvm::Type *UnpaddedType = nullptr; + + if (AbiInfo.getCoerceToType()) + CoerceType = ReverseMapper.convertType(AbiInfo.getCoerceToType()); + if (AbiInfo.getUnpaddedCoerceAndExpandType()) + UnpaddedType = + ReverseMapper.convertType(AbiInfo.getUnpaddedCoerceAndExpandType()); + + if (!CoerceType) + CoerceType = ConvertType(type); + if (!UnpaddedType) + UnpaddedType = CoerceType; + + llvm::StructType *CoerceStructType = dyn_cast(CoerceType); + if (!CoerceStructType) { + CoerceStructType = llvm::StructType::get(CoerceType->getContext()); + } + + result = ABIArgInfo::getCoerceAndExpand(CoerceStructType, UnpaddedType); + break; + } + + case llvm::abi::ABIArgInfo::InAlloca: { + result = ABIArgInfo::getInAlloca(AbiInfo.getInAllocaFieldIndex(), + AbiInfo.getInAllocaIndirect()); + if (AbiInfo.getInAllocaSRet()) + result.setInAllocaSRet(true); + break; + } + } + + return result; +} + /// Arrange the argument and result information for an abstract value /// of a given function type. This is the method which all of the /// above functions ultimately defer to. @@ -857,7 +1052,9 @@ const CGFunctionInfo &CodeGenTypes::arrangeLLVMFunctionInfo( // Construct the function info. We co-allocate the ArgInfos. FI = CGFunctionInfo::create(CC, isInstanceMethod, isChainCall, isDelegateCall, info, paramInfos, resultType, argTypes, required); + FunctionInfos.InsertNode(FI, insertPos); + std::unique_ptr tempFI; bool inserted = FunctionsBeingProcessed.insert(FI).second; (void)inserted; @@ -871,20 +1068,57 @@ const CGFunctionInfo &CodeGenTypes::arrangeLLVMFunctionInfo( } else if (info.getCC() == CC_Swift || info.getCC() == CC_SwiftAsync) { swiftcall::computeABIInfo(CGM, *FI); } else { - CGM.getABIInfo().computeInfo(*FI); + if (CGM.shouldUseLLVMABI() && + (CC == llvm::CallingConv::X86_64_SysV || CC == llvm::CallingConv::C)) { + SmallVector MappedArgTypes; + for (CanQualType ArgType : argTypes) + MappedArgTypes.push_back(Mapper.convertType(ArgType)); + tempFI.reset(llvm::abi::ABIFunctionInfo::create( + CC, Mapper.convertType(resultType), MappedArgTypes)); + + CGM.fetchABIInfo(TB).computeInfo(*tempFI); + } else + CGM.getABIInfo().computeInfo(*FI); } // Loop over all of the computed argument and return value info. If any of // them are direct or extend without a specified coerce type, specify the // default now. - ABIArgInfo &retInfo = FI->getReturnInfo(); - if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) - retInfo.setCoerceToType(ConvertType(FI->getReturnType())); + if (CGM.shouldUseLLVMABI() && + (CC == llvm::CallingConv::X86_64_SysV || CC == llvm::CallingConv::C) && + tempFI) { + + const auto &abiRetInfo = tempFI->getReturnInfo(); + ABIArgInfo &cgRetInfo = FI->getReturnInfo(); - for (auto &I : FI->arguments()) - if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) - I.info.setCoerceToType(ConvertType(I.type)); + cgRetInfo = convertABIArgInfo(abiRetInfo, FI->getReturnType()); + unsigned numArgs = std::min(FI->arg_size(), tempFI->getNumArgs()); + unsigned argIndex = 0; + + for (auto &cgArg : FI->arguments()) { + if (argIndex >= numArgs) + break; + + const auto &abiArgInfo = tempFI->getArgInfo(argIndex); + cgArg.info = convertABIArgInfo(abiArgInfo.ArgInfo, cgArg.type); + + if (abiArgInfo.ArgInfo.isInReg()) + cgArg.info.setInReg(true); + + argIndex++; + } + } else { + // Non-BPF/SysV path: handle coerce types for direct/extend cases + ABIArgInfo &retInfo = FI->getReturnInfo(); + if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) + retInfo.setCoerceToType(ConvertType(FI->getReturnType())); + + for (auto &I : FI->arguments()) { + if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) + I.info.setCoerceToType(ConvertType(I.type)); + } + } bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; assert(erased && "Not in set?"); diff --git a/clang/lib/CodeGen/CMakeLists.txt b/clang/lib/CodeGen/CMakeLists.txt index ad9ef91c781a8..9e17fcf09f707 100644 --- a/clang/lib/CodeGen/CMakeLists.txt +++ b/clang/lib/CodeGen/CMakeLists.txt @@ -1,5 +1,6 @@ set(LLVM_LINK_COMPONENTS AggressiveInstCombine + ABI Analysis BitReader BitWriter @@ -115,6 +116,7 @@ add_clang_library(clangCodeGen ModuleBuilder.cpp ObjectFilePCHContainerWriter.cpp PatternInit.cpp + QualTypeMapper.cpp SanitizerMetadata.cpp SwiftCallingConv.cpp TargetBuiltins/ARM.cpp diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp index 323823c964a79..f165b2146e8df 100644 --- a/clang/lib/CodeGen/CodeGenModule.cpp +++ b/clang/lib/CodeGen/CodeGenModule.cpp @@ -37,6 +37,7 @@ #include "clang/AST/Mangle.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/StmtVisitor.h" +#include "clang/Basic/AddressSpaces.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/CodeGenOptions.h" #include "clang/Basic/Diagnostic.h" @@ -47,6 +48,7 @@ #include "clang/CodeGen/BackendUtil.h" #include "clang/CodeGen/ConstantInitBuilder.h" #include "clang/Frontend/FrontendDiagnostic.h" +#include "llvm/ABI/TargetCodegenInfo.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringSwitch.h" @@ -72,6 +74,7 @@ #include "llvm/TargetParser/Triple.h" #include "llvm/TargetParser/X86TargetParser.h" #include "llvm/Transforms/Utils/BuildLibCalls.h" +#include #include #include @@ -104,6 +107,64 @@ static CGCXXABI *createCXXABI(CodeGenModule &CGM) { llvm_unreachable("invalid C++ ABI kind"); } +const llvm::abi::ABIInfo & +CodeGenModule::fetchABIInfo(llvm::abi::TypeBuilder &TB) { + if (getTriple().getArch() == llvm::Triple::x86_64) { + StringRef ABI = Target.getABI(); + llvm::abi::X86AVXABILevel AVXLevel = + (ABI == "avx512" ? llvm::abi::X86AVXABILevel::AVX512 + : ABI == "avx" ? llvm::abi::X86AVXABILevel::AVX + : llvm::abi::X86AVXABILevel::None); + llvm::abi::ABICompatInfo CompatInfo; + bool classifyIntegerMMXAsSSE = [&]() { + if (getContext().getLangOpts().getClangABICompat() <= + LangOptions::ClangABI::Ver3_8) + return false; + + const llvm::Triple &Triple = getTarget().getTriple(); + if (Triple.isOSDarwin() || Triple.isPS() || Triple.isOSFreeBSD()) + return false; + return true; + }(); + bool honorsRevision0_98 = !getTarget().getTriple().isOSDarwin(); + + bool passInt128VectorsInMem = [&]() { + if (getContext().getLangOpts().getClangABICompat() <= + LangOptions::ClangABI::Ver9) + return false; + + const llvm::Triple &T = getTarget().getTriple(); + return T.isOSLinux() || T.isOSNetBSD(); + }(); + + bool returnCXXRecordGreaterThan128InMem = [&]() { + if (getContext().getLangOpts().getClangABICompat() <= + LangOptions::ClangABI::Ver20) + return false; + + return true; + }(); + + CompatInfo.Flags.ClassifyIntegerMMXAsSSE = classifyIntegerMMXAsSSE; + CompatInfo.Flags.HonorsRevision98 = honorsRevision0_98; + CompatInfo.Flags.PassInt128VectorsInMem = passInt128VectorsInMem; + CompatInfo.Flags.ReturnCXXRecordGreaterThan128InMem = + returnCXXRecordGreaterThan128InMem; + CompatInfo.Flags.Clang11Compat = + getContext().getLangOpts().getClangABICompat() <= + LangOptions::ClangABI::Ver11 || + getContext().getTargetInfo().getTriple().isPS(); + + newTargetCodeGenInfo = llvm::abi::createX8664TargetCodeGenInfo( + TB, getTriple(), AVXLevel, + getContext().getTargetInfo().getPointerWidth(LangAS::Default) == 64, + CompatInfo); + + } else + newTargetCodeGenInfo = llvm::abi::createBPFTargetCodeGenInfo(TB); + return newTargetCodeGenInfo->getABIInfo(); +} + static std::unique_ptr createTargetCodeGenInfo(CodeGenModule &CGM) { const TargetInfo &Target = CGM.getTarget(); @@ -409,6 +470,11 @@ CodeGenModule::CodeGenModule(ASTContext &C, SanitizerMD(new SanitizerMetadata(*this)), AtomicOpts(Target.getAtomicOpts()) { + // Flag to use the new LLVM ABI Library :) + const llvm::Triple &Triple = Target.getTriple(); + ShouldUseLLVMABI = + Triple.isBPF() || + (Triple.getArch() == llvm::Triple::x86_64 && Triple.isOSLinux()); // Initialize the type cache. Types.reset(new CodeGenTypes(*this)); llvm::LLVMContext &LLVMContext = M.getContext(); diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h index b4b3a17662045..06a510dbac54d 100644 --- a/clang/lib/CodeGen/CodeGenModule.h +++ b/clang/lib/CodeGen/CodeGenModule.h @@ -31,6 +31,9 @@ #include "clang/Basic/TargetInfo.h" #include "clang/Basic/XRayLists.h" #include "clang/Lex/PreprocessorOptions.h" +#include "llvm/ABI/ABIInfo.h" +#include "llvm/ABI/TargetCodegenInfo.h" +#include "llvm/ABI/Types.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/SetVector.h" @@ -345,6 +348,7 @@ class CodeGenModule : public CodeGenTypeCache { typedef std::vector CtorList; private: + bool ShouldUseLLVMABI; ASTContext &Context; const LangOptions &LangOpts; IntrusiveRefCntPtr FS; // Only used for debug info. @@ -362,6 +366,7 @@ class CodeGenModule : public CodeGenTypeCache { std::unique_ptr TBAA; mutable std::unique_ptr TheTargetCodeGenInfo; + mutable std::unique_ptr newTargetCodeGenInfo; // This should not be moved earlier, since its initialization depends on some // of the previous reference members being already initialized and also checks @@ -693,7 +698,7 @@ class CodeGenModule : public CodeGenTypeCache { CoverageSourceInfo *CoverageInfo = nullptr); ~CodeGenModule(); - + bool shouldUseLLVMABI() const { return ShouldUseLLVMABI; } void clear(); /// Finalize LLVM code generation. @@ -839,6 +844,7 @@ class CodeGenModule : public CodeGenTypeCache { void maybeSetTrivialComdat(const Decl &D, llvm::GlobalObject &GO); const ABIInfo &getABIInfo(); + const llvm::abi::ABIInfo &fetchABIInfo(llvm::abi::TypeBuilder &TB); CGCXXABI &getCXXABI() const { return *ABI; } llvm::LLVMContext &getLLVMContext() { return VMContext; } diff --git a/clang/lib/CodeGen/CodeGenTypes.cpp b/clang/lib/CodeGen/CodeGenTypes.cpp index 3ffe999d01178..fd9fa2ed3aabe 100644 --- a/clang/lib/CodeGen/CodeGenTypes.cpp +++ b/clang/lib/CodeGen/CodeGenTypes.cpp @@ -33,7 +33,8 @@ using namespace CodeGen; CodeGenTypes::CodeGenTypes(CodeGenModule &cgm) : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()), - Target(cgm.getTarget()) { + Target(cgm.getTarget()), TB(Alloc), Mapper(cgm.getContext(), Alloc), + ReverseMapper(getLLVMContext(), getDataLayout()) { SkippedLayout = false; LongDoubleReferenced = false; } diff --git a/clang/lib/CodeGen/CodeGenTypes.h b/clang/lib/CodeGen/CodeGenTypes.h index 29f6f1ec80bc3..7eed12e81d991 100644 --- a/clang/lib/CodeGen/CodeGenTypes.h +++ b/clang/lib/CodeGen/CodeGenTypes.h @@ -16,6 +16,10 @@ #include "CGCall.h" #include "clang/Basic/ABI.h" #include "clang/CodeGen/CGFunctionInfo.h" +#include "clang/CodeGen/QualTypeMapper.h" +#include "llvm/ABI/ABIFunctionInfo.h" +#include "llvm/ABI/ABITypeMapper.h" +#include "llvm/ABI/Types.h" #include "llvm/ADT/DenseMap.h" #include "llvm/IR/Module.h" @@ -92,6 +96,11 @@ class CodeGenTypes { /// Helper for ConvertType. llvm::Type *ConvertFunctionTypeInternal(QualType FT); + mutable llvm::BumpPtrAllocator Alloc; + mutable llvm::abi::TypeBuilder TB; + mutable QualTypeMapper Mapper; + llvm::abi::ABITypeMapper ReverseMapper; + public: CodeGenTypes(CodeGenModule &cgm); ~CodeGenTypes(); @@ -99,6 +108,8 @@ class CodeGenTypes { const llvm::DataLayout &getDataLayout() const { return TheModule.getDataLayout(); } + llvm::abi::TypeBuilder &getTypeBuilder() { return TB; } + clang::CodeGen::QualTypeMapper &getMapper() { return Mapper; } CodeGenModule &getCGM() const { return CGM; } ASTContext &getContext() const { return Context; } const TargetInfo &getTarget() const { return Target; } @@ -273,6 +284,9 @@ class CodeGenTypes { const FunctionProtoType *FTP, const CXXMethodDecl *MD); + ABIArgInfo convertABIArgInfo(const llvm::abi::ABIArgInfo &abiInfo, + QualType type); + /// "Arrange" the LLVM information for a call or type with the given /// signature. This is largely an internal method; other clients /// should use one of the above routines, which ultimately defer to diff --git a/clang/lib/CodeGen/QualTypeMapper.cpp b/clang/lib/CodeGen/QualTypeMapper.cpp new file mode 100644 index 0000000000000..e8e23be8868fd --- /dev/null +++ b/clang/lib/CodeGen/QualTypeMapper.cpp @@ -0,0 +1,554 @@ +//==---- QualTypeMapper.cpp - Maps Clang QualType to LLVMABI Types ---------==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// Maps Clang QualType instances to corresponding LLVM ABI type +/// representations. This mapper translates high-level type information from the +/// AST into low-level ABI-specific types that encode size, alignment, and +/// layout details required for code generation and cross-language +/// interoperability. +/// +//===----------------------------------------------------------------------===// +#include "clang/CodeGen/QualTypeMapper.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/ASTFwd.h" +#include "clang/AST/Attr.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/RecordLayout.h" +#include "clang/AST/Type.h" +#include "clang/Basic/AddressSpaces.h" +#include "clang/Basic/LLVM.h" +#include "clang/Basic/TargetInfo.h" +#include "llvm/ABI/Types.h" +#include "llvm/Support/Alignment.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/TypeSize.h" +#include + +namespace clang { +namespace CodeGen { + +/// Main entry point for converting Clang QualType to LLVM ABI Type. +/// This method performs type canonicalization, caching, and dispatches +/// to specialized conversion methods based on the type kind. +/// +/// \param QT The Clang QualType to convert +/// \return Corresponding LLVM ABI Type representation, or nullptr on error +const llvm::abi::Type *QualTypeMapper::convertType(QualType QT) { + // Canonicalize type and strip qualifiers + // This ensures consistent type representation across different contexts + QT = QT.getCanonicalType().getUnqualifiedType(); + + // Results are cached since type conversion may be expensive + auto It = TypeCache.find(QT); + if (It != TypeCache.end()) + return It->second; + + const llvm::abi::Type *Result = nullptr; + if (const auto *BT = dyn_cast(QT.getTypePtr())) + Result = convertBuiltinType(BT); + else if (const auto *PT = dyn_cast(QT.getTypePtr())) + Result = convertPointerType(PT); + else if (const auto *RT = dyn_cast(QT.getTypePtr())) + Result = convertReferenceType(RT); + else if (const auto *AT = dyn_cast(QT.getTypePtr())) + Result = convertArrayType(AT); + else if (const auto *VT = dyn_cast(QT.getTypePtr())) + Result = convertVectorType(VT); + else if (const auto *RT = dyn_cast(QT.getTypePtr())) + Result = convertRecordType(RT); + else if (const auto *ET = dyn_cast(QT.getTypePtr())) + Result = convertEnumType(ET); + else if (const auto *CT = dyn_cast(QT.getTypePtr())) + Result = convertComplexType(CT); + else if (const auto *AT = dyn_cast(QT.getTypePtr())) + return convertType(AT->getValueType()); + else if (isa(QT.getTypePtr())) + return createPointerTypeForPointee(ASTCtx.VoidPtrTy); + else if (isa(QT.getTypePtr())) + Result = createPointerTypeForPointee(ASTCtx.VoidPtrTy); + else if (const auto *MT = dyn_cast(QT.getTypePtr())) { + const llvm::abi::Type *ElementType = convertType(MT->getElementType()); + uint64_t NumElements = MT->getNumRows() * MT->getNumColumns(); + return Builder.getArrayType(ElementType, NumElements, + ASTCtx.getTypeSize(QT), true); + } else if (const auto *MPT = dyn_cast(QT.getTypePtr())) + Result = convertMemberPointerType(MPT); + else if (const auto *BIT = dyn_cast(QT.getTypePtr())) { + unsigned RawNumBits = BIT->getNumBits(); + bool IsSigned = BIT->isSigned(); + llvm::Align TypeAlign = getTypeAlign(QT); + return Builder.getIntegerType(RawNumBits, TypeAlign, IsSigned, true); + } else if (isa(QT.getTypePtr()) || + isa(QT.getTypePtr())) { + // Objective-C objects are represented as pointers in the ABI + auto PointerSize = + ASTCtx.getTargetInfo().getPointerWidth(QT.getAddressSpace()); + llvm::Align PointerAlign = + llvm::Align(ASTCtx.getTargetInfo().getPointerAlign(LangAS::Default)); + return Builder.getPointerType( + PointerSize, llvm::Align(PointerAlign.value() / 8), + ASTCtx.getTargetInfo().getTargetAddressSpace(QT.getAddressSpace())); + } else + llvm_unreachable("Unsupported type for ABI lowering!"); + + if (Result) + TypeCache[QT] = Result; + return Result; +} + +/// Converts C/C++ builtin types to LLVM ABI types. +/// This handles all fundamental scalar types including integers, floats, +/// and special types like void and bool. +/// +/// \param BT The BuiltinType to convert +/// \return Corresponding LLVM ABI integer, float, or void type +const llvm::abi::Type * +QualTypeMapper::convertBuiltinType(const BuiltinType *BT) { + QualType QT(BT, 0); + + switch (BT->getKind()) { + case BuiltinType::Void: + return Builder.getVoidType(); + + case BuiltinType::NullPtr: + return createPointerTypeForPointee(QT); + + case BuiltinType::Bool: + return Builder.getIntegerType(1, getTypeAlign(QT), false, false); + case BuiltinType::Char_S: + case BuiltinType::Char_U: + case BuiltinType::SChar: + case BuiltinType::UChar: + case BuiltinType::WChar_S: + case BuiltinType::WChar_U: + case BuiltinType::Char8: + case BuiltinType::Char16: + case BuiltinType::Char32: + case BuiltinType::Short: + case BuiltinType::UShort: + case BuiltinType::Int: + case BuiltinType::UInt: + case BuiltinType::Long: + case BuiltinType::ULong: + case BuiltinType::LongLong: + case BuiltinType::ULongLong: + case BuiltinType::Int128: + case BuiltinType::UInt128: + return Builder.getIntegerType(ASTCtx.getTypeSize(QT), getTypeAlign(QT), + BT->isSignedInteger(), false); + + case BuiltinType::Half: + case BuiltinType::Float16: + case BuiltinType::BFloat16: + case BuiltinType::Float: + case BuiltinType::Double: + case BuiltinType::LongDouble: + case BuiltinType::Float128: + return Builder.getFloatType(ASTCtx.getFloatTypeSemantics(QT), + getTypeAlign(QT)); + + case BuiltinType::OCLImage1dRO: + case BuiltinType::OCLImage1dWO: + case BuiltinType::OCLImage1dRW: + case BuiltinType::OCLImage1dArrayRO: + case BuiltinType::OCLImage1dArrayWO: + case BuiltinType::OCLImage1dArrayRW: + case BuiltinType::OCLImage1dBufferRO: + case BuiltinType::OCLImage1dBufferWO: + case BuiltinType::OCLImage1dBufferRW: + case BuiltinType::OCLImage2dRO: + case BuiltinType::OCLImage2dWO: + case BuiltinType::OCLImage2dRW: + case BuiltinType::OCLImage2dArrayRO: + case BuiltinType::OCLImage2dArrayWO: + case BuiltinType::OCLImage2dArrayRW: + case BuiltinType::OCLImage2dDepthRO: + case BuiltinType::OCLImage2dDepthWO: + case BuiltinType::OCLImage2dDepthRW: + case BuiltinType::OCLImage2dArrayDepthRO: + case BuiltinType::OCLImage2dArrayDepthWO: + case BuiltinType::OCLImage2dArrayDepthRW: + case BuiltinType::OCLImage2dMSAARO: + case BuiltinType::OCLImage2dMSAAWO: + case BuiltinType::OCLImage2dMSAARW: + case BuiltinType::OCLImage2dArrayMSAARO: + case BuiltinType::OCLImage2dArrayMSAAWO: + case BuiltinType::OCLImage2dArrayMSAARW: + case BuiltinType::OCLImage2dMSAADepthRO: + case BuiltinType::OCLImage2dMSAADepthWO: + case BuiltinType::OCLImage2dMSAADepthRW: + case BuiltinType::OCLImage2dArrayMSAADepthRO: + case BuiltinType::OCLImage2dArrayMSAADepthWO: + case BuiltinType::OCLImage2dArrayMSAADepthRW: + case BuiltinType::OCLImage3dRO: + case BuiltinType::OCLImage3dWO: + case BuiltinType::OCLImage3dRW: + return createPointerTypeForPointee(QT); + + case BuiltinType::OCLSampler: + case BuiltinType::OCLEvent: + case BuiltinType::OCLQueue: + return createPointerTypeForPointee(QT); + + default: + // Unhandled BuiltinTypes are treated as unsigned integers. + return Builder.getIntegerType(ASTCtx.getTypeSize(QualType(BT, 0)), + getTypeAlign(QualType(BT, 0)), false); + } +} + +/// Converts array types to LLVM ABI array representations. +/// Handles different array kinds: constant arrays, incomplete arrays, +/// and variable-length arrays. +/// +/// \param AT The ArrayType to convert +/// \return LLVM ABI ArrayType or PointerType +const llvm::abi::Type * +QualTypeMapper::convertArrayType(const clang::ArrayType *AT) { + const llvm::abi::Type *ElementType = convertType(AT->getElementType()); + uint64_t Size = ASTCtx.getTypeSize(AT); + + if (const auto *CAT = dyn_cast(AT)) { + auto NumElements = CAT->getZExtSize(); + return Builder.getArrayType(ElementType, NumElements, Size); + } + if (isa(AT)) + return Builder.getArrayType(ElementType, 0, 0); + if (const auto *VAT = dyn_cast(AT)) + return createPointerTypeForPointee(VAT->getPointeeType()); + // Fallback for other array types + return Builder.getArrayType(ElementType, 1, Size); +} + +const llvm::abi::Type *QualTypeMapper::convertVectorType(const VectorType *VT) { + const llvm::abi::Type *ElementType = convertType(VT->getElementType()); + QualType VectorQualType(VT, 0); + + // Handle element size adjustments for sub-byte types + if (auto *IT = llvm::dyn_cast(ElementType)) { + unsigned BW = IT->getSizeInBits().getFixedValue(); + if (BW != 1 && (BW & 7)) { + BW = llvm::bit_ceil(BW); + BW = std::clamp(BW, 8u, 64u); + ElementType = + Builder.getIntegerType(BW, llvm::Align(BW / 8), IT->isSigned()); + } else if (BW < 8 && BW != 1) + ElementType = Builder.getIntegerType(8, llvm::Align(1), IT->isSigned()); + } + + unsigned NElems = VT->getNumElements(); + uint64_t LogicalSizeInBits = + NElems * ElementType->getSizeInBits().getFixedValue(); + + // Only round up for small vectors (≤ 64 bits) + if (LogicalSizeInBits <= 64) { + uint64_t ABISizeInBits = ASTCtx.getTypeSize(VectorQualType); + if (ABISizeInBits > LogicalSizeInBits) { + uint64_t ElementSizeInBits = ElementType->getSizeInBits().getFixedValue(); + NElems = ABISizeInBits / ElementSizeInBits; + } + } + // For larger vectors, keep exact element count + + llvm::ElementCount NumElements = llvm::ElementCount::getFixed(NElems); + llvm::Align VectorAlign = getTypeAlign(VectorQualType); + + return Builder.getVectorType(ElementType, NumElements, VectorAlign); +} + +/// Converts complex types to LLVM ABI complex representations. +/// Complex types consist of two components of the element type +/// (real and imaginary parts). +/// +/// \param CT The ComplexType to convert +/// \return LLVM ABI ComplexType with element type and alignment +const llvm::abi::Type * +QualTypeMapper::convertComplexType(const ComplexType *CT) { + const llvm::abi::Type *ElementType = convertType(CT->getElementType()); + llvm::Align ComplexAlign = getTypeAlign(QualType(CT, 0)); + + return Builder.getComplexType(ElementType, ComplexAlign); +} + +/// Converts member pointer types to LLVM ABI representations. +/// Member pointers have different layouts depending on whether they +/// point to functions or data members. +/// +/// \param MPT The MemberPointerType to convert +/// \return LLVM ABI MemberPointerType +const llvm::abi::Type * +QualTypeMapper::convertMemberPointerType(const clang::MemberPointerType *MPT) { + QualType QT(MPT, 0); + uint64_t Size = ASTCtx.getTypeSize(QT); + llvm::Align Align = getTypeAlign(QT); + + bool IsFunctionPointer = MPT->isMemberFunctionPointerType(); + + return Builder.getMemberPointerType(IsFunctionPointer, Size, Align); +} + +/// Converts record types (struct/class/union) to LLVM ABI representations. +/// This is the main dispatch method that handles different record kinds +/// and delegates to specialized converters. +/// +/// \param RT The RecordType to convert +/// \return LLVM ABI RecordType +const llvm::abi::Type *QualTypeMapper::convertRecordType(const RecordType *RT) { + const RecordDecl *RD = RT->getOriginalDecl()->getDefinition(); + bool canPassInRegs = false; + bool hasFlexibleArrMember = false; + if (RD) { + canPassInRegs = RD->canPassInRegisters(); + hasFlexibleArrMember = RD->hasFlexibleArrayMember(); + } + if (!RD) { + SmallVector Fields; + return Builder.getRecordType( + Fields, llvm::TypeSize::getFixed(0), llvm::Align(1), + llvm::abi::StructPacking::Default, {}, {}, false, false, false, false, + hasFlexibleArrMember, false, canPassInRegs); + } + + if (RD->isUnion()) { + const RecordDecl *UD = RT->getOriginalDecl(); + if (UD->hasAttr()) + return convertUnionType(RD, true); + return convertUnionType(RD); + } + + // Handle C++ classes with base classes + auto *const CXXRd = dyn_cast(RD); + if (CXXRd && (CXXRd->getNumBases() > 0 || CXXRd->getNumVBases() > 0)) + return convertCXXRecordType(CXXRd, canPassInRegs); + return convertStructType(RD); +} + +/// Converts C++ classes with inheritance to LLVM ABI struct representations. +/// This method handles the complex layout of C++ objects including: +/// - Virtual table pointers for polymorphic classes +/// - Base class subobjects (both direct and virtual bases) +/// - Member field layout with proper offsets +/// +/// \param RD The C++ record declaration +/// \return LLVM ABI RecordType representing the complete object layout +const llvm::abi::RecordType * +QualTypeMapper::convertCXXRecordType(const CXXRecordDecl *RD, + bool canPassInRegs) { + const ASTRecordLayout &Layout = ASTCtx.getASTRecordLayout(RD); + SmallVector Fields; + SmallVector BaseClasses; + SmallVector VirtualBaseClasses; + + // Add vtable pointer for polymorphic classes + if (RD->isPolymorphic()) { + const llvm::abi::Type *VtablePointer = + createPointerTypeForPointee(ASTCtx.VoidPtrTy); + Fields.emplace_back(VtablePointer, 0); + } + + for (const auto &Base : RD->bases()) { + if (Base.isVirtual()) + continue; + + const RecordType *BaseRT = Base.getType()->castAs(); + const llvm::abi::Type *BaseType = convertType(Base.getType()); + uint64_t BaseOffset = + Layout.getBaseClassOffset(BaseRT->getAsCXXRecordDecl()).getQuantity() * + 8; + + BaseClasses.emplace_back(BaseType, BaseOffset); + } + + for (const auto &VBase : RD->vbases()) { + const RecordType *VBaseRT = VBase.getType()->getAs(); + if (!VBaseRT) + continue; + + const llvm::abi::Type *VBaseType = convertType(VBase.getType()); + uint64_t VBaseOffset = + Layout.getVBaseClassOffset(VBaseRT->getAsCXXRecordDecl()) + .getQuantity() * + 8; + + VirtualBaseClasses.emplace_back(VBaseType, VBaseOffset); + } + + computeFieldInfo(RD, Fields, Layout); + + llvm::sort(Fields, + [](const llvm::abi::FieldInfo &A, const llvm::abi::FieldInfo &B) { + return A.OffsetInBits < B.OffsetInBits; + }); + + llvm::TypeSize Size = + llvm::TypeSize::getFixed(Layout.getSize().getQuantity() * 8); + llvm::Align Alignment = llvm::Align(Layout.getAlignment().getQuantity()); + + bool HasNonTrivialCopy = !RD->hasSimpleCopyConstructor(); + bool HasNonTrivialDtor = !RD->hasSimpleDestructor(); + bool HasFlexibleArrayMember = RD->hasFlexibleArrayMember(); + bool HasUnalignedFields = false; + + unsigned FieldIndex = 0; + for (const auto *FD : RD->fields()) { + uint64_t FieldOffset = Layout.getFieldOffset(FieldIndex); + uint64_t ExpectedAlignment = ASTCtx.getTypeAlign(FD->getType()); + if (FieldOffset % ExpectedAlignment != 0) { + HasUnalignedFields = true; + break; + } + ++FieldIndex; + } + + return Builder.getRecordType( + Fields, Size, Alignment, llvm::abi::StructPacking::Default, BaseClasses, + VirtualBaseClasses, true, RD->isPolymorphic(), HasNonTrivialCopy, + HasNonTrivialDtor, HasFlexibleArrayMember, HasUnalignedFields, + canPassInRegs); +} + +/// Converts reference types to pointer representations in the ABI. +/// Both lvalue references (T&) and rvalue references (T&&) are represented +/// as pointers at the ABI level. +/// +/// \param RT The ReferenceType to convert +/// \return LLVM ABI PointerType +const llvm::abi::Type * +QualTypeMapper::convertReferenceType(const ReferenceType *RT) { + return createPointerTypeForPointee(RT->getPointeeType()); +} + +/// Converts pointer types to LLVM ABI pointer representations. +/// Takes into account address space information for the pointed-to type. +/// +/// \param PT The PointerType to convert +/// \return LLVM ABI PointerType with appropriate size and alignment +const llvm::abi::Type * +QualTypeMapper::convertPointerType(const clang::PointerType *PT) { + return createPointerTypeForPointee(PT->getPointeeType()); +} + +/// Converts enumeration types to their underlying integer representations. +/// This method handles various enum states and falls back to safe defaults +/// when enum information is incomplete or invalid. +/// +/// \param ET The EnumType to convert +/// \return LLVM ABI IntegerType representing the enum's underlying type +const llvm::abi::Type * +QualTypeMapper::convertEnumType(const clang::EnumType *ET) { + const EnumDecl *ED = ET->getOriginalDecl(); + QualType UnderlyingType = ED->getIntegerType(); + + if (UnderlyingType.isNull()) + UnderlyingType = ASTCtx.IntTy; + + return convertType(UnderlyingType); +} + +/// Converts plain C structs and C++ classes without inheritance. +/// This handles the simpler case where we only need to layout member fields +/// without considering base classes or virtual functions. +/// +/// \param RD The RecordDecl to convert +/// \return LLVM ABI RecordType +const llvm::abi::RecordType * +QualTypeMapper::convertStructType(const clang::RecordDecl *RD) { + const ASTRecordLayout &Layout = ASTCtx.getASTRecordLayout(RD); + + bool IsCXXRecord = isa(RD); + SmallVector Fields; + computeFieldInfo(RD, Fields, Layout); + + llvm::TypeSize Size = + llvm::TypeSize::getFixed(Layout.getSize().getQuantity() * 8); + llvm::Align Alignment = llvm::Align(Layout.getAlignment().getQuantity()); + + return Builder.getRecordType( + Fields, Size, Alignment, llvm::abi::StructPacking::Default, {}, {}, + IsCXXRecord, false, false, false, RD->hasFlexibleArrayMember(), false, + RD->canPassInRegisters()); +} + +/// Converts C union types where all fields occupy the same memory location. +/// The union size is determined by its largest member, and all fields +/// start at offset 0. +/// +/// \param RD The RecordDecl representing the union +/// \return LLVM ABI UnionType +const llvm::abi::RecordType * +QualTypeMapper::convertUnionType(const clang::RecordDecl *RD, + bool isTransparent) { + const ASTRecordLayout &Layout = ASTCtx.getASTRecordLayout(RD); + + SmallVector AllFields; + computeFieldInfo(RD, AllFields, Layout); + + llvm::TypeSize Size = + llvm::TypeSize::getFixed(Layout.getSize().getQuantity() * 8); + llvm::Align Alignment = llvm::Align(Layout.getAlignment().getQuantity()); + + return Builder.getUnionType(AllFields, Size, Alignment, + llvm::abi::StructPacking::Default, isTransparent, + RD->canPassInRegisters(), isa(RD)); +} + +llvm::Align QualTypeMapper::getPreferredTypeAlign(QualType QT) const { + return llvm::Align(ASTCtx.getPreferredTypeAlignInChars(QT).getQuantity()); +} + +llvm::Align QualTypeMapper::getTypeAlign(QualType QT) const { + + return llvm::Align(ASTCtx.getTypeAlignInChars(QT).getQuantity()); +} + +const llvm::abi::Type * +QualTypeMapper::createPointerTypeForPointee(QualType PointeeType) { + auto AddrSpace = PointeeType.getAddressSpace(); + auto PointerSize = ASTCtx.getTargetInfo().getPointerWidth(AddrSpace); + llvm::Align Alignment = + llvm::Align(ASTCtx.getTargetInfo().getPointerAlign(AddrSpace)); + return Builder.getPointerType( + PointerSize, llvm::Align(Alignment.value() / 8), + ASTCtx.getTargetInfo().getTargetAddressSpace(AddrSpace)); +} + +/// Processes the fields of a record (struct/class/union) and populates +/// the Fields vector with FieldInfo objects containing type, offset, +/// and bitfield information. +/// +/// \param RD The RecordDecl whose fields to process +/// \param Fields Output vector to populate with field information +/// \param Layout The AST record layout containing field offset information +void QualTypeMapper::computeFieldInfo( + const RecordDecl *RD, SmallVectorImpl &Fields, + const ASTRecordLayout &Layout) { + unsigned FieldIndex = 0; + + for (const auto *FD : RD->fields()) { + const llvm::abi::Type *FieldType = convertType(FD->getType()); + uint64_t OffsetInBits = Layout.getFieldOffset(FieldIndex); + + bool IsBitField = FD->isBitField(); + uint64_t BitFieldWidth = 0; + bool IsUnnamedBitField = false; + + if (IsBitField) { + BitFieldWidth = FD->getBitWidthValue(); + IsUnnamedBitField = FD->isUnnamedBitField(); + } + + Fields.emplace_back(FieldType, OffsetInBits, IsBitField, BitFieldWidth, + IsUnnamedBitField); + ++FieldIndex; + } +} + +} // namespace CodeGen +} // namespace clang diff --git a/llvm/include/llvm/ABI/ABIFunctionInfo.h b/llvm/include/llvm/ABI/ABIFunctionInfo.h new file mode 100644 index 0000000000000..a25ca5d86cda6 --- /dev/null +++ b/llvm/include/llvm/ABI/ABIFunctionInfo.h @@ -0,0 +1,414 @@ +//===----- ABIFunctionInfo.h - ABI Function Information ----- C++ ---------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Defines ABIFunctionInfo and associated types used in representing the +// ABI-coerced types for function arguments and return values. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ABI_ABIFUNCTIONINFO_H +#define LLVM_ABI_ABIFUNCTIONINFO_H + +#include "llvm/ABI/Types.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/IR/CallingConv.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/TrailingObjects.h" + +namespace llvm { +namespace abi { + +/// ABIArgInfo - Helper class to encapsulate information about how a +/// specific type should be passed to or returned from a function. +class ABIArgInfo { +public: + enum Kind { + Direct, + Extend, + Indirect, + IndirectAliased, + Ignore, + Expand, + CoerceAndExpand, + InAlloca + }; + +private: + const Type *CoercionType; + struct DirectAttrInfo { + unsigned Offset; + unsigned Align; + }; + + struct IndirectAttrInfo { + unsigned Align; + unsigned AddrSpace; + }; + + union { + DirectAttrInfo DirectAttr; + IndirectAttrInfo IndirectAttr; + unsigned AllocaFieldIndex; + }; + union { + const Type *PaddingType; + const Type *UnpaddedCoerceAndExpandType; + }; + Kind TheKind; + bool InReg : 1; + bool PaddingInReg : 1; + bool SignExt : 1; + bool ZeroExt : 1; + bool IndirectByVal : 1; + bool IndirectRealign : 1; + bool SRetAfterThis : 1; + bool CanBeFlattened : 1; + bool InAllocaSRet : 1; + bool InAllocaIndirect : 1; + + ABIArgInfo(Kind K = Direct) + : CoercionType(nullptr), TheKind(K), InReg(false), PaddingInReg(false), + SignExt(false), ZeroExt(false), IndirectByVal(false) {} + +public: + static ABIArgInfo getDirect(const Type *T = nullptr, unsigned Offset = 0, + const Type *Padding = nullptr, + bool CanBeFlattened = true, unsigned Align = 0) { + ABIArgInfo AI(Direct); + AI.CoercionType = T; + AI.PaddingType = Padding; + AI.DirectAttr.Offset = Offset; + AI.DirectAttr.Align = Align; + AI.CanBeFlattened = CanBeFlattened; + return AI; + } + + static ABIArgInfo getIndirectAliased(unsigned Align, unsigned AddrSpace = 0, + bool Realign = false, + const Type *Padding = nullptr) { + ABIArgInfo AI(IndirectAliased); + AI.IndirectAttr.Align = Align; + AI.IndirectAttr.AddrSpace = AddrSpace; + AI.IndirectRealign = Realign; + AI.PaddingType = Padding; + return AI; + } + static ABIArgInfo getDirectInReg(const Type *T = nullptr) { + ABIArgInfo AI = getDirect(T); + AI.InReg = true; + return AI; + } + static ABIArgInfo getExtend(const Type *T) { + assert(T && "Type cannot be null"); + assert(T->isInteger() && "Unexpected type - only integers can be extended"); + + ABIArgInfo AI(Extend); + AI.CoercionType = T; + AI.DirectAttr.Offset = 0; + AI.DirectAttr.Align = 0; + AI.PaddingType = nullptr; + + const IntegerType *IntTy = dyn_cast(T); + if (IntTy->isSigned()) { + AI.setSignExt(); + } else { + AI.setZeroExt(); + } + + return AI; + } + + ABIArgInfo &setSignExt(bool SignExtend = true) { + this->SignExt = SignExtend; + if (SignExtend) + this->ZeroExt = false; + return *this; + } + + ABIArgInfo &setZeroExt(bool ZeroExtend = true) { + this->ZeroExt = ZeroExtend; + if (ZeroExtend) + this->SignExt = false; + return *this; + } + + static ABIArgInfo getIndirect(unsigned Align = 0, bool ByVal = true, + unsigned AddrSpace = 0, bool Realign = false, + const Type *Padding = nullptr) { + ABIArgInfo AI(Indirect); + AI.IndirectAttr.Align = Align; + AI.IndirectAttr.AddrSpace = AddrSpace; + AI.IndirectByVal = ByVal; + AI.IndirectRealign = Realign; + AI.SRetAfterThis = false; + AI.PaddingType = Padding; + return AI; + } + + static ABIArgInfo getIndirectInReg(unsigned Align = 0, bool ByVal = true) { + ABIArgInfo AI = getIndirect(Align, ByVal); + AI.InReg = true; + return AI; + } + + static ABIArgInfo getIgnore() { return ABIArgInfo(Ignore); } + static ABIArgInfo getExpand() { return ABIArgInfo(Expand); } + + static ABIArgInfo getCoerceAndExpand(const Type *CoercionType) { + ABIArgInfo AI(CoerceAndExpand); + AI.CoercionType = CoercionType; + return AI; + } + + Kind getKind() const { return TheKind; } + bool isDirect() const { return TheKind == Direct; } + bool isIndirect() const { return TheKind == Indirect; } + bool isIgnore() const { return TheKind == Ignore; } + bool isExtend() const { return TheKind == Extend; } + bool isExpand() const { return TheKind == Expand; } + bool isCoerceAndExpand() const { return TheKind == CoerceAndExpand; } + bool isIndirectAliased() const { return TheKind == IndirectAliased; } + bool isInAlloca() const { return TheKind == InAlloca; } + bool isInReg() const { return InReg; } + bool isSignExt() const { return SignExt; } + bool hasPaddingInReg() const { return PaddingInReg; } + + const Type *getPaddingType() const { + return canHavePaddingType() ? PaddingType : nullptr; + } + + bool canHavePaddingType() const { + return isDirect() || isExtend() || isIndirect() || isIndirectAliased() || + isExpand(); + } + + unsigned getDirectOffset() const { + assert((isDirect() || isExtend()) && "Not a direct or extend kind"); + return DirectAttr.Offset; + } + unsigned getIndirectAlign() const { + assert((isIndirect() || isIndirectAliased()) && "Invalid Kind!"); + return IndirectAttr.Align; + } + + unsigned getIndirectAddrSpace() const { + assert((isIndirect() || isIndirectAliased()) && "Invalid Kind!"); + return IndirectAttr.AddrSpace; + } + + bool getIndirectByVal() const { + assert(isIndirect() && "Invalid Kind!"); + return IndirectByVal; + } + bool getIndirectRealign() const { + assert((isIndirect() || isIndirectAliased()) && "Invalid Kind!"); + return IndirectRealign; + } + + bool isSRetAfterThis() const { + assert(isIndirect() && "Invalid Kind!"); + return SRetAfterThis; + } + + unsigned getInAllocaFieldIndex() const { + assert(isInAlloca() && "Invalid kind!"); + return AllocaFieldIndex; + } + + bool getInAllocaIndirect() const { + assert(isInAlloca() && "Invalid kind!"); + return InAllocaIndirect; + } + + bool getInAllocaSRet() const { + assert(isInAlloca() && "Invalid kind!"); + return InAllocaSRet; + } + const Type *getUnpaddedCoerceAndExpandType() const { + assert(isCoerceAndExpand()); + return UnpaddedCoerceAndExpandType; + } + bool isZeroExt() const { + assert(isExtend() && "Invalid Kind!"); + return ZeroExt; + } + bool isNoExt() const { + assert(isExtend() && "Invalid Kind!"); + return !SignExt && !ZeroExt; + } + + unsigned getDirectAlign() const { + assert((isDirect() || isExtend()) && "Not a direct or extend kind"); + return DirectAttr.Align; + } + + bool getCanBeFlattened() const { + assert(isDirect() && "Invalid kind!"); + return CanBeFlattened; + } + + const Type *getCoerceToType() const { + assert((isDirect() || isExtend() || isCoerceAndExpand()) && + "Invalid Kind!"); + return CoercionType; + } + + ABIArgInfo &setInReg(bool InReg = true) { + this->InReg = InReg; + return *this; + } + + ABIArgInfo &setPaddingInReg(bool HasPadding = true) { + this->PaddingInReg = HasPadding; + return *this; + } +}; + +/// Function-level ABI attributes that affect argument/return passing +struct ABICallAttributes { + CallingConv::ID CC = CallingConv::C; + CallingConv::ID EffectiveCC = CallingConv::C; + + bool HasSRet = false; + bool IsInstanceMethod = false; + bool IsChainCall = false; + bool IsDelegateCall = false; + + // Register usage controls + bool HasRegParm = false; + unsigned RegParm = 0; + bool NoCallerSavedRegs = false; + + // Security extensions + bool NoCfCheck = false; + bool CmseNSCall = false; + + // Memory management + bool ReturnsRetained = false; + unsigned MaxVectorWidth = 0; + + ABICallAttributes() = default; + ABICallAttributes(CallingConv::ID CC) : CC(CC), EffectiveCC(CC) {} +}; + +/// Information about required vs optional arguments for variadic functions +struct RequiredArgs { +private: + unsigned NumRequired; + static constexpr unsigned All = ~0U; + +public: + RequiredArgs() : NumRequired(All) {} + explicit RequiredArgs(unsigned N) : NumRequired(N) {} + + static RequiredArgs forPrototypedFunction(unsigned NumArgs) { + return RequiredArgs(NumArgs); + } + + static RequiredArgs forVariadicFunction(unsigned NumRequired) { + return RequiredArgs(NumRequired); + } + + bool allowsOptionalArgs() const { return NumRequired != All; } + bool isVariadic() const { return allowsOptionalArgs(); } + + unsigned getNumRequiredArgs() const { + assert(allowsOptionalArgs()); + return NumRequired; + } + + bool operator==(const RequiredArgs &Other) const { + return NumRequired == Other.NumRequired; + } +}; + +/// Argument information for ABIFunctionInfo +struct ABIFunctionInfoArgInfo { + const Type *ABIType; + ABIArgInfo ArgInfo; + + ABIFunctionInfoArgInfo() + : ABIType(nullptr), ArgInfo(ABIArgInfo::getDirect()) {} + ABIFunctionInfoArgInfo(Type *T) + : ABIType(T), ArgInfo(ABIArgInfo::getDirect()) {} + ABIFunctionInfoArgInfo(Type *T, ABIArgInfo A) : ABIType(T), ArgInfo(A) {} +}; + +class ABIFunctionInfo final + : private TrailingObjects { + typedef ABIFunctionInfoArgInfo ArgInfo; + +private: + const Type *ReturnType; + ABIArgInfo ReturnInfo; + unsigned NumArgs; + ABICallAttributes CallAttrs; + RequiredArgs Required; + + ABIFunctionInfo(const Type *RetTy, unsigned NumArguments) + : ReturnType(RetTy), ReturnInfo(ABIArgInfo::getDirect()), + NumArgs(NumArguments) {} + + friend class TrailingObjects; + +public: + typedef const ArgInfo *const_arg_iterator; + typedef ArgInfo *arg_iterator; + + void operator delete(void *p) { ::operator delete(p); } + const_arg_iterator arg_begin() const { return getTrailingObjects(); } + const_arg_iterator arg_end() const { return getTrailingObjects() + NumArgs; } + arg_iterator arg_begin() { return getTrailingObjects(); } + arg_iterator arg_end() { return getTrailingObjects() + NumArgs; } + + unsigned arg_size() const { return NumArgs; } + + static ABIFunctionInfo * + create(CallingConv::ID CC, const Type *ReturnType, + ArrayRef ArgTypes, + const ABICallAttributes &CallAttrs = ABICallAttributes(), + RequiredArgs Required = RequiredArgs()); + + const Type *getReturnType() const { return ReturnType; } + ABIArgInfo &getReturnInfo() { return ReturnInfo; } + const ABIArgInfo &getReturnInfo() const { return ReturnInfo; } + + CallingConv::ID getCallingConvention() const { return CallAttrs.CC; } + const ABICallAttributes &getCallAttributes() const { return CallAttrs; } + RequiredArgs getRequiredArgs() const { return Required; } + bool isVariadic() const { return Required.isVariadic(); } + + unsigned getNumRequiredArgs() const { + return isVariadic() ? Required.getNumRequiredArgs() : arg_size(); + } + + ArrayRef arguments() const { + return {getTrailingObjects(), NumArgs}; + } + + MutableArrayRef arguments() { + return {getTrailingObjects(), NumArgs}; + } + + ArgInfo &getArgInfo(unsigned Index) { + assert(Index < NumArgs && "Invalid argument index"); + return arguments()[Index]; + } + + const ArgInfo &getArgInfo(unsigned Index) const { + assert(Index < NumArgs && "Invalid argument index"); + return arguments()[Index]; + } + + unsigned getNumArgs() const { return NumArgs; } +}; + +} // namespace abi +} // namespace llvm + +#endif // LLVM_ABI_ABIFUNCTIONINFO_H diff --git a/llvm/include/llvm/ABI/ABIInfo.h b/llvm/include/llvm/ABI/ABIInfo.h new file mode 100644 index 0000000000000..5f8f067709ad0 --- /dev/null +++ b/llvm/include/llvm/ABI/ABIInfo.h @@ -0,0 +1,91 @@ +//===----- ABIInfo.h - ABI information access & encapsulation ----- C++ ---===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// ABI information access & encapsulation +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ABI_ABIINFO_H +#define LLVM_ABI_ABIINFO_H + +#include "llvm/ABI/ABIFunctionInfo.h" +#include "llvm/ABI/Types.h" +#include +#include + +namespace llvm { +namespace abi { + +enum RecordArgABI { + /// Pass it using the normal C aggregate rules for the ABI, potentially + /// introducing extra copies and passing some or all of it in registers. + RAA_Default = 0, + + /// Pass it on the stack using its defined layout. The argument must be + /// evaluated directly into the correct stack position in the arguments area, + /// and the call machinery must not move it or introduce extra copies. + RAA_DirectInMemory, + + /// Pass it as a pointer to temporary memory. + RAA_Indirect +}; + +struct ABICompatInfo { + unsigned Version = UINT_MAX; + + struct ABIFlags { + bool PassInt128VectorsInMem : 1; + bool ReturnCXXRecordGreaterThan128InMem : 1; + bool ClassifyIntegerMMXAsSSE : 1; + bool HonorsRevision98 : 1; + bool Clang11Compat : 1; + + ABIFlags() + : PassInt128VectorsInMem(true), + ReturnCXXRecordGreaterThan128InMem(true), + ClassifyIntegerMMXAsSSE(true), HonorsRevision98(true), + Clang11Compat(true) {} + + } Flags; + + ABICompatInfo() : Version(UINT_MAX) {} + ABICompatInfo(unsigned Ver) : Version(Ver) {} +}; + +/// Abstract base class for target-specific ABI information. +class ABIInfo { +private: + ABICompatInfo CompatInfo; + +public: + ABIInfo() : CompatInfo() {} + explicit ABIInfo(const ABICompatInfo &Info) : CompatInfo(Info) {} + + virtual ~ABIInfo() = default; + + RecordArgABI getRecordArgABI(const RecordType *RT) const; + RecordArgABI getRecordArgABI(const Type *Ty) const; + RecordArgABI getRecordArgABI(const RecordType *RT, bool IsCxxRecord) const; + bool isPromotableInteger(const IntegerType *IT) const; + virtual void computeInfo(ABIFunctionInfo &FI) const = 0; + virtual bool isPassByRef(const Type *Ty) const { return false; } + const ABICompatInfo &getABICompatInfo() const { return CompatInfo; } + ABIArgInfo getNaturalAlignIndirect(const Type *Ty, bool ByVal = true) const; + bool isAggregateTypeForABI(const Type *Ty) const; + bool isZeroSizedType(const Type *Ty) const; + bool isEmptyRecord(const RecordType *RT) const; + bool isEmptyField(const FieldInfo &FI) const; + + void setABICompatInfo(const ABICompatInfo &Info) { CompatInfo = Info; } +}; + +} // namespace abi +} // namespace llvm + +#endif // LLVM_ABI_ABIINFO_H diff --git a/llvm/include/llvm/ABI/ABITypeMapper.h b/llvm/include/llvm/ABI/ABITypeMapper.h new file mode 100644 index 0000000000000..9b73e509eb66f --- /dev/null +++ b/llvm/include/llvm/ABI/ABITypeMapper.h @@ -0,0 +1,67 @@ +//===---- ABITypeMapper.h - Maps LLVM ABI Types to LLVM IR Types --------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// Maps LLVM ABI type representations back to corresponding LLVM IR types. +/// This reverse mapper translates low-level ABI-specific types back into +/// LLVM IR types suitable for code generation and optimization passes. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_ABITYPEMAPPER_H +#define LLVM_CODEGEN_ABITYPEMAPPER_H + +#include "llvm/ABI/Types.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Type.h" +#include "llvm/Support/TypeSize.h" + +namespace llvm::abi { + +class ABITypeMapper { +public: + explicit ABITypeMapper(LLVMContext &Ctx, const DataLayout &DL) + : Context(Ctx), DL(DL) {} + + llvm::Type *convertType(const abi::Type *ABIType); + + void clearCache() { TypeCache.clear(); } + +private: + LLVMContext &Context; + const DataLayout &DL; + + llvm::DenseMap TypeCache; + + llvm::Type *convertArrayType(const abi::ArrayType *AT); + + llvm::Type *convertMatrixType(const abi::ArrayType *MT); + + llvm::Type *convertVectorType(const abi::VectorType *VT); + + llvm::Type *convertRecordType(const abi::RecordType *RT); + + llvm::Type *getFloatTypeForSemantics(const fltSemantics &Semantics); + + llvm::StructType *createStructFromFields(ArrayRef Fields, + TypeSize Size, Align Alignment, + bool IsUnion = false, + bool IsCoercedStr = false); + llvm::Type *createPaddingType(uint64_t PaddingBits); + llvm::Type *convertComplexType(const abi::ComplexType *CT); + + llvm::Type *convertMemberPointerType(const abi::MemberPointerType *MPT); +}; + +} // namespace llvm::abi + +#endif // LLVM_CODEGEN_ABITYPEMAPPER_H diff --git a/llvm/include/llvm/ABI/TargetCodegenInfo.h b/llvm/include/llvm/ABI/TargetCodegenInfo.h new file mode 100644 index 0000000000000..e799f8ca87e2e --- /dev/null +++ b/llvm/include/llvm/ABI/TargetCodegenInfo.h @@ -0,0 +1,74 @@ +//===----- TargetCodeGenInfo.h ------------------------------------ C++ ---===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#include "llvm/ABI/ABIInfo.h" +#include "llvm/TargetParser/Triple.h" +#include + +#ifndef LLVM_ABI_TARGETCODEGENINFO_H +#define LLVM_ABI_TARGETCODEGENINFO_H + +namespace llvm::abi { + +class TargetCodeGenInfo { + std::unique_ptr Info; + +protected: + template const T getABIInfo() const { + return static_cast(*Info); + } + +public: + TargetCodeGenInfo(std::unique_ptr Info) + : Info(std::move(Info)) {} + + virtual ~TargetCodeGenInfo() = default; + + const ABIInfo &getABIInfo() const { return *Info; } +}; + +std::unique_ptr +createDefaultTargetCodeGenInfo(TypeBuilder &TB); + +std::unique_ptr createBPFTargetCodeGenInfo(TypeBuilder &TB); + +/// The AVX ABI level for X86 targets. +enum class X86AVXABILevel { + None, + AVX, + AVX512, +}; + +std::unique_ptr +createX8664TargetCodeGenInfo(TypeBuilder &TB, const Triple &Triple, + X86AVXABILevel AVXLevel, bool Has64BitPointers, + const ABICompatInfo &Compat); +std::unique_ptr +createAArch64TargetCodeGenInfo(TypeBuilder &TB); + +std::unique_ptr createARMTargetCodeGenInfo(TypeBuilder &TB); + +std::unique_ptr +createRISCVTargetCodeGenInfo(TypeBuilder &TB); + +std::unique_ptr +createPPC64TargetCodeGenInfo(TypeBuilder &TB); + +std::unique_ptr +createSystemZTargetCodeGenInfo(TypeBuilder &TB); + +std::unique_ptr +createWebAssemblyTargetCodeGenInfo(TypeBuilder &TB); + +std::unique_ptr +createNVPTXTargetCodeGenInfo(TypeBuilder &TB); + +std::unique_ptr +createAMDGPUTargetCodeGenInfo(TypeBuilder &TB); +} // namespace llvm::abi + +#endif diff --git a/llvm/include/llvm/ABI/Types.h b/llvm/include/llvm/ABI/Types.h new file mode 100644 index 0000000000000..dc21167173ab4 --- /dev/null +++ b/llvm/include/llvm/ABI/Types.h @@ -0,0 +1,531 @@ +//===- ABI/Types.h ----------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines the Types and related helper methods concerned to the +/// LLVMABI library which mirrors ABI related type information from +/// the LLVM frontend. +/// +//===----------------------------------------------------------------------===// +#ifndef LLVM_ABI_TYPES_H +#define LLVM_ABI_TYPES_H + +#include "llvm/ADT/APFloat.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Support/Alignment.h" +#include "llvm/Support/Allocator.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/TypeSize.h" +#include +#include + +namespace llvm { +namespace abi { + +enum class TypeKind { + Void, + MemberPointer, + Complex, + Integer, + Float, + Pointer, + Array, + Vector, + Record, +}; + +class Type { +private: + TypeSize getTypeStoreSize() const { + TypeSize StoreSizeInBits = getTypeStoreSizeInBits(); + return {StoreSizeInBits.getKnownMinValue() / 8, + StoreSizeInBits.isScalable()}; + } + TypeSize getTypeStoreSizeInBits() const { + TypeSize BaseSize = getSizeInBits(); + uint64_t AlignedSizeInBits = + alignToPowerOf2(BaseSize.getKnownMinValue(), 8); + return {AlignedSizeInBits, BaseSize.isScalable()}; + } + +protected: + TypeKind Kind; + TypeSize SizeInBits; + Align ABIAlignment; + + Type(TypeKind K, TypeSize SizeInBits, Align ABIAlign) + : Kind(K), SizeInBits(SizeInBits), ABIAlignment(ABIAlign) {} + +public: + TypeKind getKind() const { return Kind; } + TypeSize getSizeInBits() const { return SizeInBits; } + Align getAlignment() const { return ABIAlignment; } + + TypeSize getTypeAllocSize() const { + return alignTo(getTypeStoreSize(), getAlignment().value()); + } + + bool isVoid() const { return Kind == TypeKind::Void; } + bool isInteger() const { return Kind == TypeKind::Integer; } + bool isFloat() const { return Kind == TypeKind::Float; } + bool isPointer() const { return Kind == TypeKind::Pointer; } + bool isArray() const { return Kind == TypeKind::Array; } + bool isVector() const { return Kind == TypeKind::Vector; } + bool isRecord() const { return Kind == TypeKind::Record; } + bool isMemberPointer() const { return Kind == TypeKind::MemberPointer; } + bool isComplex() const { return Kind == TypeKind::Complex; } +}; + +class VoidType : public Type { +public: + VoidType() : Type(TypeKind::Void, TypeSize::getFixed(0), Align(1)) {} + + static bool classof(const Type *T) { return T->getKind() == TypeKind::Void; } +}; + +class ComplexType : public Type { +public: + ComplexType(const Type *ElementType, uint64_t SizeInBits, Align Alignment) + : Type(TypeKind::Complex, TypeSize::getFixed(SizeInBits), Alignment), + ElementType(ElementType) {} + + const Type *getElementType() const { return ElementType; } + + static bool classof(const Type *T) { + return T->getKind() == TypeKind::Complex; + } + +private: + const Type *ElementType; +}; + +class IntegerType : public Type { +private: + bool IsSigned; + bool IsBitInt; + +public: + IntegerType(uint64_t BitWidth, Align ABIAlign, bool IsSigned, + bool IsBitInt = false) + : Type(TypeKind::Integer, TypeSize::getFixed(BitWidth), ABIAlign), + IsSigned(IsSigned), IsBitInt(IsBitInt) {} + + bool isSigned() const { return IsSigned; } + bool isBitInt() const { return IsBitInt; } + bool isBool() const { + return getSizeInBits().getFixedValue() == 1 && !IsBitInt; + } + + static bool classof(const Type *T) { + return T->getKind() == TypeKind::Integer; + } +}; + +class FloatType : public Type { +private: + const fltSemantics *Semantics; + +public: + FloatType(const fltSemantics &FloatSemantics, Align ABIAlign) + : Type(TypeKind::Float, + TypeSize::getFixed(APFloat::getSizeInBits(FloatSemantics)), + ABIAlign), + Semantics(&FloatSemantics) {} + + const fltSemantics *getSemantics() const { return Semantics; } + static bool classof(const Type *T) { return T->getKind() == TypeKind::Float; } +}; + +class PointerLikeType : public Type { +protected: + unsigned AddrSpace; + PointerLikeType(TypeKind K, TypeSize SizeInBits, Align ABIAlign, unsigned AS) + : Type(K, SizeInBits, ABIAlign), AddrSpace(AS) {} + +public: + unsigned getAddrSpace() const { return AddrSpace; } + bool isMemberPointer() const { return getKind() == TypeKind::MemberPointer; } + + static bool classof(const Type *T) { + return T->getKind() == TypeKind::Pointer || + T->getKind() == TypeKind::MemberPointer; + } +}; + +class PointerType : public PointerLikeType { +public: + PointerType(uint64_t Size, Align ABIAlign, unsigned AddressSpace = 0) + : PointerLikeType(TypeKind::Pointer, TypeSize::getFixed(Size), ABIAlign, + AddressSpace) {} + + static bool classof(const Type *T) { + return T->getKind() == TypeKind::Pointer; + } +}; + +class MemberPointerType : public PointerLikeType { +private: + bool IsFunctionPointer; + +public: + MemberPointerType(bool IsFunctionPointer, uint64_t SizeInBits, Align ABIAlign, + unsigned AddressSpace = 0) + : PointerLikeType(TypeKind::MemberPointer, TypeSize::getFixed(SizeInBits), + ABIAlign, AddressSpace), + IsFunctionPointer(IsFunctionPointer) {} + bool isFunctionPointer() const { return IsFunctionPointer; } + + static bool classof(const Type *T) { + return T->getKind() == TypeKind::MemberPointer; + } +}; + +class ArrayType : public Type { +private: + const Type *ElementType; + uint64_t NumElements; + bool IsMatrix; + +public: + ArrayType(const Type *ElementType, uint64_t NumElements, uint64_t SizeInBits, + bool IsMatrixType = false) + : Type(TypeKind::Array, TypeSize::getFixed(SizeInBits), + ElementType->getAlignment()), + ElementType(ElementType), NumElements(NumElements), + IsMatrix(IsMatrixType) {} + + const Type *getElementType() const { return ElementType; } + uint64_t getNumElements() const { return NumElements; } + bool isMatrixType() const { return IsMatrix; } + + static bool classof(const Type *T) { return T->getKind() == TypeKind::Array; } +}; + +class VectorType : public Type { +private: + const Type *ElementType; + ElementCount NumElements; + +public: + VectorType(const Type *ElementType, ElementCount NumElements, Align ABIAlign) + : Type(TypeKind::Vector, + TypeSize(ElementType->getSizeInBits().getFixedValue() * + NumElements.getKnownMinValue(), + NumElements.isScalable()), + ABIAlign), + ElementType(ElementType), NumElements(NumElements) {} + + const Type *getElementType() const { return ElementType; } + ElementCount getNumElements() const { return NumElements; } + + static bool classof(const Type *T) { + return T->getKind() == TypeKind::Vector; + } +}; + +struct FieldInfo { + const Type *FieldType; + uint64_t OffsetInBits; + uint64_t BitFieldWidth; + bool IsBitField; + bool IsUnnamedBitfield; + + FieldInfo(const Type *FieldType, uint64_t OffsetInBits = 0, + bool IsBitField = false, uint64_t BitFieldWidth = 0, + bool IsUnnamedBitField = false) + : FieldType(FieldType), OffsetInBits(OffsetInBits), + BitFieldWidth(BitFieldWidth), IsBitField(IsBitField), + IsUnnamedBitfield(IsUnnamedBitField) {} +}; + +enum class StructPacking { Default, Packed, ExplicitPacking }; + +class RecordType : public Type { +private: + ArrayRef Fields; + ArrayRef BaseClasses; + ArrayRef VirtualBaseClasses; + StructPacking Packing; + bool CanPassInRegisters; + bool IsCoercedRecord; + bool IsUnion; + bool IsTransparent; + + bool IsCXXRecord; + bool IsPolymorphic; + bool HasNonTrivialCopyConstructor; + bool HasNonTrivialDestructor; + bool HasFlexibleArrayMember; + bool HasUnalignedFields; + +public: + RecordType(ArrayRef StructFields, ArrayRef Bases, + ArrayRef VBases, TypeSize Size, Align Align, + StructPacking Pack = StructPacking::Default, bool Union = false, + bool CXXRecord = false, bool Polymorphic = false, + bool NonTrivialCopy = false, bool NonTrivialDtor = false, + bool FlexibleArray = false, bool UnalignedFields = false, + bool CanPassInRegs = false, bool IsCoercedRec = false, + bool Transparent = false) + : Type(TypeKind::Record, Size, Align), Fields(StructFields), + BaseClasses(Bases), VirtualBaseClasses(VBases), Packing(Pack), + CanPassInRegisters(CanPassInRegs), IsCoercedRecord(IsCoercedRec), + IsUnion(Union), IsTransparent(Transparent), IsCXXRecord(CXXRecord), + IsPolymorphic(Polymorphic), + HasNonTrivialCopyConstructor(NonTrivialCopy), + HasNonTrivialDestructor(NonTrivialDtor), + HasFlexibleArrayMember(FlexibleArray), + HasUnalignedFields(UnalignedFields) {} + + uint32_t getNumFields() const { return Fields.size(); } + StructPacking getPacking() const { return Packing; } + + bool isUnion() const { return IsUnion; } + bool isCXXRecord() const { return IsCXXRecord; } + bool isPolymorphic() const { return IsPolymorphic; } + bool hasNonTrivialCopyConstructor() const { + return HasNonTrivialCopyConstructor; + } + bool isCoercedRecord() const { return IsCoercedRecord; } + bool canPassInRegisters() const { return CanPassInRegisters; } + bool hasNonTrivialDestructor() const { return HasNonTrivialDestructor; } + bool hasFlexibleArrayMember() const { return HasFlexibleArrayMember; } + bool hasUnalignedFields() const { return HasUnalignedFields; } + + uint32_t getNumBaseClasses() const { return BaseClasses.size(); } + uint32_t getNumVirtualBaseClasses() const { + return VirtualBaseClasses.size(); + } + bool isTransparentUnion() const { return IsTransparent; } + ArrayRef getFields() const { return Fields; } + ArrayRef getBaseClasses() const { return BaseClasses; } + ArrayRef getVirtualBaseClasses() const { + return VirtualBaseClasses; + } + + static bool isEmptyForABI(const llvm::abi::Type *Ty) { + const auto *RT = dyn_cast(Ty); + if (!RT) + return false; + + for (const auto &Field : RT->getFields()) { + if (!Field.IsUnnamedBitfield) + return false; + } + + if (RT->isCXXRecord()) { + for (const auto &Base : RT->getBaseClasses()) { + if (!isEmptyForABI(Base.FieldType)) + return false; + } + + for (const auto &VBase : RT->getVirtualBaseClasses()) { + if (!isEmptyForABI(VBase.FieldType)) + return false; + } + } + + return true; + } + + const FieldInfo *getElementContainingOffset(unsigned OffsetInBits) const { + SmallVector> AllElements; + + for (const auto &Base : BaseClasses) { + if (!isEmptyForABI(Base.FieldType)) + AllElements.emplace_back(Base.OffsetInBits, &Base); + } + + for (const auto &VBase : VirtualBaseClasses) { + if (!isEmptyForABI(VBase.FieldType)) + AllElements.emplace_back(VBase.OffsetInBits, &VBase); + } + + for (const auto &Field : Fields) { + if (Field.IsUnnamedBitfield) + continue; + AllElements.emplace_back(Field.OffsetInBits, &Field); + } + + llvm::stable_sort(AllElements, [](const auto &A, const auto &B) { + return A.first < B.first; + }); + + auto *It = llvm::upper_bound(AllElements, OffsetInBits, + [](unsigned Offset, const auto &Element) { + return Offset < Element.first; + }); + + if (It == AllElements.begin()) + return nullptr; + + --It; + + const FieldInfo *Candidate = It->second; + unsigned ElementStart = It->first; + unsigned ElementSize = + Candidate->FieldType->getSizeInBits().getFixedValue(); + + if (OffsetInBits >= ElementStart && + OffsetInBits < ElementStart + ElementSize) + return Candidate; + + return nullptr; + } + static bool classof(const Type *T) { + return T->getKind() == TypeKind::Record; + } +}; + +/// API for creating ABI Types +class TypeBuilder { +private: + BumpPtrAllocator &Allocator; + +public: + explicit TypeBuilder(BumpPtrAllocator &Alloc) : Allocator(Alloc) {} + + const VoidType *getVoidType() { + return new (Allocator.Allocate()) VoidType(); + } + + const IntegerType *getIntegerType(uint64_t BitWidth, Align Align, bool Signed, + bool IsBitInt = false) { + return new (Allocator.Allocate()) + IntegerType(BitWidth, Align, Signed, IsBitInt); + } + + const FloatType *getFloatType(const fltSemantics &Semantics, Align Align) { + return new (Allocator.Allocate()) FloatType(Semantics, Align); + } + + const PointerType *getPointerType(uint64_t Size, Align Align, + unsigned Addrspace = 0) { + return new (Allocator.Allocate()) + PointerType(Size, Align, Addrspace); + } + + const ArrayType *getArrayType(const Type *ElementType, uint64_t NumElements, + uint64_t SizeInBits, + bool IsMatrixType = false) { + return new (Allocator.Allocate()) + ArrayType(ElementType, NumElements, SizeInBits, IsMatrixType); + } + + const VectorType *getVectorType(const Type *ElementType, + ElementCount NumElements, Align Align) { + return new (Allocator.Allocate()) + VectorType(ElementType, NumElements, Align); + } + + // TODO: clean up this function + const RecordType * + getRecordType(ArrayRef Fields, TypeSize Size, Align Align, + StructPacking Pack = StructPacking::Default, + ArrayRef BaseClasses = {}, + ArrayRef VirtualBaseClasses = {}, + bool CXXRecord = false, bool Polymorphic = false, + bool NonTrivialCopy = false, bool NonTrivialDtor = false, + bool FlexibleArray = false, bool UnalignedFields = false, + bool CanPassInRegister = false) { + FieldInfo *FieldArray = Allocator.Allocate(Fields.size()); + std::copy(Fields.begin(), Fields.end(), FieldArray); + + FieldInfo *BaseArray = nullptr; + if (!BaseClasses.empty()) { + BaseArray = Allocator.Allocate(BaseClasses.size()); + std::copy(BaseClasses.begin(), BaseClasses.end(), BaseArray); + } + + FieldInfo *VBaseArray = nullptr; + if (!VirtualBaseClasses.empty()) { + VBaseArray = Allocator.Allocate(VirtualBaseClasses.size()); + std::copy(VirtualBaseClasses.begin(), VirtualBaseClasses.end(), + VBaseArray); + } + + ArrayRef FieldsRef(FieldArray, Fields.size()); + ArrayRef BasesRef(BaseArray, BaseClasses.size()); + ArrayRef VBasesRef(VBaseArray, VirtualBaseClasses.size()); + + return new (Allocator.Allocate()) + RecordType(FieldsRef, BasesRef, VBasesRef, Size, Align, Pack, false, + CXXRecord, Polymorphic, NonTrivialCopy, NonTrivialDtor, + FlexibleArray, UnalignedFields, CanPassInRegister); + } + + /// Creates a coerced record type for ABI purposes. + /// + /// Coerced record types are artificial struct representations used internally + /// by the ABI layer to represent non-aggregate types in a convenient way. + /// For example, a function argument that needs to be passed in two registers + /// might be coerced into a struct with two fields: {i64, i32}. + /// + /// @param Fields The fields of the coerced struct + /// @param Size Total size in bits + /// @param Align Alignment requirements + /// @param Pack Struct packing mode (usually Default) + /// @return A RecordType marked as coerced for ABI purposes + const RecordType * + getCoercedRecordType(ArrayRef Fields, TypeSize Size, Align Align, + StructPacking Pack = StructPacking::Default) { + FieldInfo *FieldArray = Allocator.Allocate(Fields.size()); + std::copy(Fields.begin(), Fields.end(), FieldArray); + + ArrayRef FieldsRef(FieldArray, Fields.size()); + + return new (Allocator.Allocate()) RecordType( + FieldsRef, ArrayRef(), ArrayRef(), Size, Align, + Pack, false, false, false, false, false, false, false, true, true); + } + + const RecordType *getUnionType(ArrayRef Fields, TypeSize Size, + Align Align, + StructPacking Pack = StructPacking::Default, + bool IsTransparent = false, + bool CanPassInRegs = false, + bool CXXRecord = false) { + FieldInfo *FieldArray = Allocator.Allocate(Fields.size()); + + for (size_t I = 0; I < Fields.size(); ++I) { + const FieldInfo &Field = Fields[I]; + new (&FieldArray[I]) + FieldInfo(Field.FieldType, 0, Field.IsBitField, Field.BitFieldWidth, + Field.IsUnnamedBitfield); + } + + ArrayRef FieldsRef(FieldArray, Fields.size()); + + return new (Allocator.Allocate()) + RecordType(FieldsRef, ArrayRef(), ArrayRef(), + Size, Align, Pack, true, CXXRecord, false, false, false, + false, false, CanPassInRegs, false, IsTransparent); + } + + const ComplexType *getComplexType(const Type *ElementType, Align Align) { + // Complex types have two elements (real and imaginary parts) + uint64_t ElementSize = ElementType->getSizeInBits().getFixedValue(); + uint64_t ComplexSize = ElementSize * 2; + + return new (Allocator.Allocate()) + ComplexType(ElementType, ComplexSize, Align); + } + + const MemberPointerType *getMemberPointerType(bool IsFunctionPointer, + uint64_t SizeInBits, + Align Align) { + return new (Allocator.Allocate()) + MemberPointerType(IsFunctionPointer, SizeInBits, Align); + } +}; + +} // namespace abi +} // namespace llvm + +#endif diff --git a/llvm/lib/ABI/ABIFunctionInfo.cpp b/llvm/lib/ABI/ABIFunctionInfo.cpp new file mode 100644 index 0000000000000..eed444e9837b3 --- /dev/null +++ b/llvm/lib/ABI/ABIFunctionInfo.cpp @@ -0,0 +1,40 @@ +//===----- ABIFunctionInfo.cpp - ABI Function Information --------- C++ ---===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/ABI/ABIFunctionInfo.h" + +using namespace llvm; +using namespace llvm::abi; + +ABIFunctionInfo *ABIFunctionInfo::create(CallingConv::ID CC, + const Type *ReturnType, + ArrayRef ArgTypes, + const ABICallAttributes &CallAttrs, + RequiredArgs Required) { + + assert(!Required.allowsOptionalArgs() || + Required.getNumRequiredArgs() <= ArgTypes.size()); + + void *Buffer = operator new( + totalSizeToAlloc(ArgTypes.size())); + + ABIFunctionInfo *FI = + new (Buffer) ABIFunctionInfo(ReturnType, ArgTypes.size()); + + FI->CallAttrs = CallAttrs; + FI->CallAttrs.CC = CC; + FI->Required = Required; + + auto Args = FI->arguments(); + for (unsigned I = 0; I < ArgTypes.size(); ++I) { + Args[I].ABIType = ArgTypes[I]; + Args[I].ArgInfo = ABIArgInfo::getDirect(); + } + + return FI; +} diff --git a/llvm/lib/ABI/ABIInfo.cpp b/llvm/lib/ABI/ABIInfo.cpp new file mode 100644 index 0000000000000..48765eb4c8188 --- /dev/null +++ b/llvm/lib/ABI/ABIInfo.cpp @@ -0,0 +1,99 @@ +//===----- ABIInfo.cpp ------------------------------------------- C++ ---===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#include "llvm/ABI/ABIInfo.h" + +using namespace llvm::abi; +bool ABIInfo::isAggregateTypeForABI(const Type *Ty) const { + // Check for fundamental scalar types + if (Ty->isInteger() || Ty->isFloat() || Ty->isPointer() || Ty->isVector()) + return false; + + // Everything else is treated as aggregate + return true; +} + +bool ABIInfo::isPromotableInteger(const IntegerType *IT) const { + unsigned BitWidth = IT->getSizeInBits().getFixedValue(); + return BitWidth < 32; +} + +// Create indirect return with natural alignment +ABIArgInfo ABIInfo::getNaturalAlignIndirect(const Type *Ty, bool ByVal) const { + return ABIArgInfo::getIndirect(Ty->getAlignment().value(), ByVal); +} +RecordArgABI ABIInfo::getRecordArgABI(const RecordType *RT) const { + if (RT && !RT->canPassInRegisters()) + return RAA_Indirect; + return RAA_Default; +} + +RecordArgABI ABIInfo::getRecordArgABI(const RecordType *RT, + bool IsCxxRecord) const { + if (!IsCxxRecord) { + if (!RT->canPassInRegisters()) + return RAA_Indirect; + return RAA_Default; + } + return getRecordArgABI(RT); +} + +RecordArgABI ABIInfo::getRecordArgABI(const Type *Ty) const { + const RecordType *RT = dyn_cast(Ty); + if (!RT) + return RAA_Default; + return getRecordArgABI(RT, RT->isCXXRecord()); +} + +bool ABIInfo::isZeroSizedType(const Type *Ty) const { + return Ty->getSizeInBits().getFixedValue() == 0; +} + +bool ABIInfo::isEmptyRecord(const RecordType *RT) const { + if (RT->hasFlexibleArrayMember() || RT->isPolymorphic() || + RT->getNumVirtualBaseClasses() != 0) + return false; + + for (unsigned I = 0; I < RT->getNumBaseClasses(); ++I) { + const Type *BaseTy = RT->getBaseClasses()[I].FieldType; + auto *BaseRT = dyn_cast(BaseTy); + if (!BaseRT || !isEmptyRecord(BaseRT)) + return false; + } + + for (unsigned I = 0; I < RT->getNumFields(); ++I) { + const FieldInfo &FI = RT->getFields()[I]; + + if (FI.IsBitField && FI.BitFieldWidth == 0) + continue; + if (FI.IsUnnamedBitfield) + continue; + + if (!isZeroSizedType(FI.FieldType)) + return false; + } + return true; +} + +bool ABIInfo::isEmptyField(const FieldInfo &FI) const { + if (FI.IsUnnamedBitfield) + return true; + if (FI.IsBitField && FI.BitFieldWidth == 0) + return true; + + const Type *Ty = FI.FieldType; + while (auto *AT = dyn_cast(Ty)) { + if (AT->getNumElements() != 1) + break; + Ty = AT->getElementType(); + } + + if (auto *RT = dyn_cast(Ty)) + return isEmptyRecord(RT); + + return isZeroSizedType(Ty); +} diff --git a/llvm/lib/ABI/ABITypeMapper.cpp b/llvm/lib/ABI/ABITypeMapper.cpp new file mode 100644 index 0000000000000..c38cf42d2e0df --- /dev/null +++ b/llvm/lib/ABI/ABITypeMapper.cpp @@ -0,0 +1,208 @@ +//===---- ABITypeMapper.cpp - Maps LLVM ABI Types to LLVM IR Types ------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// Maps LLVM ABI type representations to corresponding LLVM IR types. +/// This reverse mapper translates low-level ABI-specific types back into +/// LLVM IR types suitable for code generation and optimization passes. +/// +//===----------------------------------------------------------------------===// + +#include "llvm/ABI/ABITypeMapper.h" +#include "llvm/ABI/Types.h" +#include "llvm/ADT/APFloat.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Type.h" + +using namespace llvm::abi; + +llvm::Type *ABITypeMapper::convertType(const abi::Type *ABIType) { + if (!ABIType) + return nullptr; + + auto It = TypeCache.find(ABIType); + if (It != TypeCache.end()) + return It->second; + + llvm::Type *Result = nullptr; + + switch (ABIType->getKind()) { + case abi::TypeKind::Integer: { + const auto *IT = cast(ABIType); + unsigned Bitwidth = IT->getSizeInBits().getFixedValue(); + Result = llvm::IntegerType::get(Context, Bitwidth); + break; + } + case abi::TypeKind::Float: { + const fltSemantics *Semantics = + cast(ABIType)->getSemantics(); + Result = llvm::Type::getFloatingPointTy(Context, *Semantics); + break; + } + case abi::TypeKind::Pointer: + Result = llvm::PointerType::get( + Context, cast(ABIType)->getAddrSpace()); + break; + case abi::TypeKind::Array: + Result = convertArrayType(cast(ABIType)); + break; + case abi::TypeKind::Vector: + Result = convertVectorType(cast(ABIType)); + break; + case abi::TypeKind::Record: + Result = convertRecordType(cast(ABIType)); + break; + case abi::TypeKind::Void: + Result = llvm::Type::getVoidTy(Context); + break; + case abi::TypeKind::Complex: + Result = convertComplexType(cast(ABIType)); + break; + case abi::TypeKind::MemberPointer: + Result = convertMemberPointerType(cast(ABIType)); + break; + } + + if (Result) + TypeCache[ABIType] = Result; + + return Result; +} + +llvm::Type *ABITypeMapper::convertArrayType(const abi::ArrayType *AT) { + llvm::Type *ElementType = convertType(AT->getElementType()); + if (!ElementType) + return nullptr; + + uint64_t NumElements = AT->getNumElements(); + if (AT->isMatrixType()) + return llvm::VectorType::get(ElementType, + ElementCount::getFixed(NumElements)); + + return llvm::ArrayType::get(ElementType, NumElements); +} + +llvm::Type *ABITypeMapper::convertVectorType(const abi::VectorType *VT) { + llvm::Type *ElementType = convertType(VT->getElementType()); + if (!ElementType) + return nullptr; + + ElementCount EC = VT->getNumElements(); + return llvm::VectorType::get(ElementType, EC); +} + +llvm::Type *ABITypeMapper::convertRecordType(const abi::RecordType *RT) { + ArrayRef FieldsArray = RT->getFields(); + return createStructFromFields(FieldsArray, RT->getSizeInBits(), + RT->getAlignment(), RT->isUnion(), + RT->isCoercedRecord()); +} + +llvm::Type *ABITypeMapper::convertComplexType(const abi::ComplexType *CT) { + // Complex types are represented as structs with two elements: {real, imag} + llvm::Type *ElementType = convertType(CT->getElementType()); + if (!ElementType) + return nullptr; + + SmallVector Fields = {ElementType, ElementType}; + return llvm::StructType::get(Context, Fields, /*isPacked=*/false); +} + +llvm::Type * +ABITypeMapper::convertMemberPointerType(const abi::MemberPointerType *MPT) { + if (MPT->isFunctionPointer()) { + llvm::Type *IntPtrTy = DL.getIntPtrType(Context); + SmallVector Fields = {IntPtrTy, IntPtrTy}; + return llvm::StructType::get(Context, Fields, /*isPacked=*/false); + } + return DL.getIntPtrType(Context); +} + +llvm::Type *ABITypeMapper::createPaddingType(uint64_t PaddingBits) { + if (PaddingBits == 0) + return nullptr; + + if (PaddingBits % 8 == 0) { + llvm::Type *ByteType = llvm::IntegerType::get(Context, 8); + return llvm::ArrayType::get(ByteType, PaddingBits / 8); + } + return llvm::IntegerType::get(Context, PaddingBits); +} + +llvm::StructType * +ABITypeMapper::createStructFromFields(ArrayRef Fields, + TypeSize Size, Align Alignment, + bool IsUnion, bool IsCoercedStr) { + SmallVector FieldTypes; + + if (IsUnion) { + llvm::Type *LargestFieldType = nullptr; + uint64_t LargestFieldSize = 0; + + for (const auto &Field : Fields) { + llvm::Type *FieldType = convertType(Field.FieldType); + if (!FieldType) + continue; + + uint64_t FieldSize = DL.getTypeSizeInBits(FieldType); + if (FieldSize > LargestFieldSize) { + LargestFieldSize = FieldSize; + LargestFieldType = FieldType; + } + } + + if (LargestFieldType) { + FieldTypes.push_back(LargestFieldType); + + uint64_t UnionSizeBits = Size.getFixedValue(); + if (LargestFieldSize < UnionSizeBits) { + llvm::Type *PaddingType = + createPaddingType(UnionSizeBits - LargestFieldSize); + if (PaddingType) + FieldTypes.push_back(PaddingType); + } + } + } else { + uint64_t CurrentOffset = 0; + + for (const auto &Field : Fields) { + if (!IsCoercedStr && Field.OffsetInBits > CurrentOffset) { + llvm::Type *PaddingType = + createPaddingType(Field.OffsetInBits - CurrentOffset); + if (PaddingType) + FieldTypes.push_back(PaddingType); + CurrentOffset = Field.OffsetInBits; + } + + llvm::Type *FieldType = convertType(Field.FieldType); + if (!FieldType) + continue; + + if (Field.IsBitField && Field.BitFieldWidth > 0) { + FieldType = llvm::IntegerType::get(Context, Field.BitFieldWidth); + CurrentOffset += Field.BitFieldWidth; + } else { + FieldTypes.push_back(FieldType); + CurrentOffset += DL.getTypeSizeInBits(FieldType); + } + } + + if (!IsCoercedStr) { + uint64_t TotalSizeBits = Size.getFixedValue(); + if (CurrentOffset < TotalSizeBits) { + llvm::Type *PaddingType = + createPaddingType(TotalSizeBits - CurrentOffset); + if (PaddingType) + FieldTypes.push_back(PaddingType); + } + } + } + + return StructType::get(Context, FieldTypes, /*isPacked=*/false); +} diff --git a/llvm/lib/ABI/CMakeLists.txt b/llvm/lib/ABI/CMakeLists.txt new file mode 100644 index 0000000000000..7a70bf3e7b810 --- /dev/null +++ b/llvm/lib/ABI/CMakeLists.txt @@ -0,0 +1,17 @@ +add_llvm_component_library(LLVMABI + ABIFunctionInfo.cpp + ABITypeMapper.cpp + ABIInfo.cpp + Targets/BPF.cpp + Targets/X86.cpp + + ADDITIONAL_HEADER_DIRS + ${LLVM_MAIN_INCLUDE_DIR}/llvm/ABI + + DEPENDS + intrinsics_gen + + LINK_COMPONENTS + Core + Support +) diff --git a/llvm/lib/ABI/Targets/BPF.cpp b/llvm/lib/ABI/Targets/BPF.cpp new file mode 100644 index 0000000000000..90dd52fd99683 --- /dev/null +++ b/llvm/lib/ABI/Targets/BPF.cpp @@ -0,0 +1,94 @@ +//===- BPF.cpp - BPF ABI Implementation ----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/ABI/ABIFunctionInfo.h" +#include "llvm/ABI/ABIInfo.h" +#include "llvm/ABI/TargetCodegenInfo.h" +#include "llvm/ABI/Types.h" +#include "llvm/Support/Alignment.h" +#include "llvm/Support/Casting.h" + +namespace llvm::abi { + +class BPFABIInfo : public ABIInfo { +private: + TypeBuilder &TB; + +public: + BPFABIInfo(TypeBuilder &TypeBuilder) : TB(TypeBuilder) {} + + ABIArgInfo classifyReturnType(const Type *RetTy) const { + if (RetTy->isVoid()) + return ABIArgInfo::getIgnore(); + + if (isAggregateTypeForABI(RetTy)) { + auto SizeInBits = RetTy->getSizeInBits().getFixedValue(); + if (SizeInBits == 0) + return ABIArgInfo::getIgnore(); + return ABIArgInfo::getIndirect(RetTy->getAlignment().value()); + } + + if (const auto *IntTy = dyn_cast(RetTy)) { + if (IntTy->isBitInt() && IntTy->getSizeInBits().getFixedValue() > 128) + return ABIArgInfo::getIndirect(RetTy->getAlignment().value()); + } + + return ABIArgInfo::getDirect(); + } + + ABIArgInfo classifyArgumentType(const Type *ArgTy) const { + if (isAggregateTypeForABI(ArgTy)) { + auto SizeInBits = ArgTy->getSizeInBits().getFixedValue(); + if (SizeInBits == 0) + return ABIArgInfo::getIgnore(); + + if (SizeInBits <= 128) { + const Type *CoerceTy; + if (SizeInBits <= 64) { + auto AlignedBits = alignTo(SizeInBits, 8); + CoerceTy = TB.getIntegerType(AlignedBits, Align(8), false); + } else { + const Type *RegTy = TB.getIntegerType(64, Align(8), false); + CoerceTy = TB.getArrayType(RegTy, 2, 128); + } + return ABIArgInfo::getDirect(CoerceTy); + } + + return ABIArgInfo::getIndirect(ArgTy->getAlignment().value()); + } + + if (const auto *IntTy = dyn_cast(ArgTy)) { + auto BitWidth = IntTy->getSizeInBits().getFixedValue(); + if (IntTy->isBitInt() && BitWidth > 128) + return ABIArgInfo::getIndirect(ArgTy->getAlignment().value()); + + if (isPromotableInteger(IntTy)) + return ABIArgInfo::getExtend(ArgTy); + } + return ABIArgInfo::getDirect(); + } + + void computeInfo(ABIFunctionInfo &FI) const override { + FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); + for (auto &I : FI.arguments()) { + I.ArgInfo = classifyArgumentType(I.ABIType); + } + } +}; + +class BPFTargetCodeGenInfo : public TargetCodeGenInfo { +public: + BPFTargetCodeGenInfo(TypeBuilder &TB) + : TargetCodeGenInfo(std::make_unique(TB)) {} +}; + +std::unique_ptr createBPFTargetCodeGenInfo(TypeBuilder &TB) { + return std::make_unique(TB); +} + +} // namespace llvm::abi diff --git a/llvm/lib/ABI/Targets/X86.cpp b/llvm/lib/ABI/Targets/X86.cpp new file mode 100644 index 0000000000000..009a56701e706 --- /dev/null +++ b/llvm/lib/ABI/Targets/X86.cpp @@ -0,0 +1,1458 @@ +//===- X86.cpp ------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/ABI/ABIFunctionInfo.h" +#include "llvm/ABI/ABIInfo.h" +#include "llvm/ABI/TargetCodegenInfo.h" +#include "llvm/ABI/Types.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/IR/Type.h" +#include "llvm/Support/Alignment.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/TypeSize.h" +#include "llvm/TargetParser/Triple.h" +#include +#include +#include + +namespace llvm { +namespace abi { + +static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) { + switch (AVXLevel) { + case X86AVXABILevel::AVX512: + return 512; + case X86AVXABILevel::AVX: + return 256; + case X86AVXABILevel::None: + return 128; + } + llvm_unreachable("Unknown AVXLevel"); +} + +class X86_64ABIInfo : public ABIInfo { +public: + enum Class { + Integer = 0, + SSE, + SSEUp, + X87, + X87UP, + Complex_X87, + NoClass, + Memory + }; + +private: + TypeBuilder &TB; + X86AVXABILevel AVXLevel; + bool Has64BitPointers; + const llvm::Triple &TargetTriple; + + static Class merge(Class Accum, Class Field); + + void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; + + void classify(const Type *T, uint64_t OffsetBase, Class &Lo, Class &Hi, + bool IsNamedArg, bool IsRegCall = false) const; + + const Type *getIntegerTypeAtOffset(const Type *IRType, unsigned IROffset, + const Type *SourceTy, + unsigned SourceOffset, + bool InMemory = false) const; + + const Type *getSSETypeAtOffset(const Type *ABIType, unsigned ABIOffset, + const Type *SourceTy, + unsigned SourceOffset) const; + bool isIllegalVectorType(const Type *Ty) const; + bool containsMatrixField(const RecordType *RT) const; + + void computeInfo(ABIFunctionInfo &FI) const override; + ABIArgInfo getIndirectReturnResult(const Type *Ty) const; + const Type *getFPTypeAtOffset(const Type *Ty, unsigned Offset) const; + + const Type *isSingleElementStruct(const Type *Ty) const; + const Type *getByteVectorType(const Type *Ty) const; + + const Type *createPairType(const Type *Lo, const Type *Hi) const; + ABIArgInfo getIndirectResult(const Type *Ty, unsigned FreeIntRegs) const; + + ABIArgInfo classifyReturnType(const Type *RetTy) const; + const char *getClassName(Class C) const; + + ABIArgInfo classifyArgumentType(const Type *Ty, unsigned FreeIntRegs, + unsigned &NeededInt, unsigned &NeededSse, + bool IsNamedArg, + bool IsRegCall = false) const; + const Type *useFirstFieldIfTransparentUnion(const Type *Ty) const; + +public: + X86_64ABIInfo(TypeBuilder &TypeBuilder, const Triple &Triple, + X86AVXABILevel AVXABILevel, bool Has64BitPtrs, + const ABICompatInfo &Compat) + : ABIInfo(Compat), TB(TypeBuilder), AVXLevel(AVXABILevel), + Has64BitPointers(Has64BitPtrs), TargetTriple(Triple) {} + + bool has64BitPointers() const { return Has64BitPointers; } +}; + +// Gets the "best" type to represent the union. +static const Type *reduceUnionForX8664(const RecordType *UnionType, + TypeBuilder &TB) { + assert(UnionType->isUnion() && "Expected union type"); + + ArrayRef Fields = UnionType->getFields(); + if (Fields.empty()) { + return nullptr; + } + + const Type *StorageType = nullptr; + + for (const auto &Field : Fields) { + if (Field.IsBitField && Field.IsUnnamedBitfield && + Field.BitFieldWidth == 0) { + continue; + } + + const Type *FieldType = Field.FieldType; + + if (UnionType->isTransparentUnion() && !StorageType) { + StorageType = FieldType; + break; + } + + if (!StorageType || + FieldType->getAlignment() > StorageType->getAlignment() || + (FieldType->getAlignment() == StorageType->getAlignment() && + TypeSize::isKnownGT(FieldType->getSizeInBits(), + StorageType->getSizeInBits()))) { + StorageType = FieldType; + } + } + return StorageType; +} + +void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, + Class &Hi) const { + // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: + // + // (a) If one of the classes is Memory, the whole argument is passed in + // memory. + // + // (b) If X87UP is not preceded by X87, the whole argument is passed in + // memory. + // + // (c) If the size of the aggregate exceeds two eightbytes and the first + // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole + // argument is passed in memory. NOTE: This is necessary to keep the + // ABI working for processors that don't support the __m256 type. + // + // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. + // + // Some of these are enforced by the merging logic. Others can arise + // only with unions; for example: + // union { _Complex double; unsigned; } + // + // Note that clauses (b) and (c) were added in 0.98. + + if (Hi == Memory) + Lo = Memory; + if (Hi == X87UP && Lo != X87 && getABICompatInfo().Flags.HonorsRevision98) + Lo = Memory; + if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) + Lo = Memory; + if (Hi == SSEUp && Lo != SSE) + Hi = SSE; +} +X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { + // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is + // classified recursively so that always two fields are + // considered. The resulting class is calculated according to + // the classes of the fields in the eightbyte: + // + // (a) If both classes are equal, this is the resulting class. + // + // (b) If one of the classes is NO_CLASS, the resulting class is + // the other class. + // + // (c) If one of the classes is MEMORY, the result is the MEMORY + // class. + // + // (d) If one of the classes is INTEGER, the result is the + // INTEGER. + // + // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, + // MEMORY is used as class. + // + // (f) Otherwise class SSE is used. + + // Accum should never be memory (we should have returned) or + // ComplexX87 (because this cannot be passed in a structure). + assert((Accum != Memory && Accum != Complex_X87) && + "Invalid accumulated classification during merge."); + + if (Accum == Field || Field == NoClass) + return Accum; + if (Accum == NoClass) + return Field; + if (Field == Memory) + return Memory; + if (Accum == Integer || Field == Integer) + return Integer; + if (Field == X87 || Field == X87UP || Field == Complex_X87 || Accum == X87 || + Accum == X87UP) + return Memory; + + return SSE; +} + +bool X86_64ABIInfo::containsMatrixField(const RecordType *RT) const { + for (const auto &Field : RT->getFields()) { + const Type *FieldType = Field.FieldType; + + if (const auto *AT = dyn_cast(FieldType)) + return AT->isMatrixType(); + + if (const auto *NestedRT = dyn_cast(FieldType)) + return containsMatrixField(NestedRT); + } + return false; +} + +void X86_64ABIInfo::classify(const Type *T, uint64_t OffsetBase, Class &Lo, + Class &Hi, bool IsNamedArg, bool IsRegCall) const { + Lo = Hi = NoClass; + Class &Current = OffsetBase < 64 ? Lo : Hi; + Current = Memory; + + if (T->isVoid()) { + Current = NoClass; + return; + } + + if (const auto *IT = dyn_cast(T)) { + auto BitWidth = IT->getSizeInBits().getFixedValue(); + + if (BitWidth == 128 || + (IT->isBitInt() && BitWidth > 64 && BitWidth <= 128)) { + Lo = Integer; + Hi = Integer; + } else if (BitWidth <= 64) + Current = Integer; + + return; + } + + if (const auto *FT = dyn_cast(T)) { + const auto *FltSem = FT->getSemantics(); + + if (FltSem == &llvm::APFloat::IEEEsingle() || + FltSem == &llvm::APFloat::IEEEdouble() || + FltSem == &llvm::APFloat::IEEEhalf() || + FltSem == &llvm::APFloat::BFloat()) { + Current = SSE; + } else if (FltSem == &llvm::APFloat::IEEEquad()) { + Lo = SSE; + Hi = SSEUp; + } else if (FltSem == &llvm::APFloat::x87DoubleExtended()) { + Lo = X87; + Hi = X87UP; + } else + Current = SSE; + return; + } + if (T->isPointer()) { + Current = Integer; + return; + } + + if (const auto *MPT = dyn_cast(T)) { + if (MPT->isFunctionPointer()) { + if (Has64BitPointers) { + Lo = Hi = Integer; + } else { + uint64_t EbFuncPtr = OffsetBase / 64; + uint64_t EbThisAdj = (OffsetBase + 64 - 1) / 64; + if (EbFuncPtr != EbThisAdj) { + Lo = Hi = Integer; + } else + Current = Integer; + } + } else + Current = Integer; + return; + } + + if (const auto *VT = dyn_cast(T)) { + auto Size = VT->getSizeInBits().getFixedValue(); + const Type *ElementType = VT->getElementType(); + + if (Size == 1 || Size == 8 || Size == 16 || Size == 32) { + // gcc passes the following as integer: + // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float> + // 2 bytes - <2 x char>, <1 x short> + // 1 byte - <1 x char> + Current = Integer; + // If this type crosses an eightbyte boundary, it should be + // split. + uint64_t EbLo = (OffsetBase) / 64; + uint64_t EbHi = (OffsetBase + Size - 1) / 64; + if (EbLo != EbHi) + Hi = Lo; + } else if (Size == 64) { + if (const auto *FT = dyn_cast(ElementType)) { + // gcc passes <1 x double> in memory. :( + if (FT->getSemantics() == &llvm::APFloat::IEEEdouble()) + return; + } + + // gcc passes <1 x long long> as SSE but clang used to unconditionally + // pass them as integer. For platforms where clang is the de facto + // platform compiler, we must continue to use integer. + if (const auto *IT = dyn_cast(ElementType)) { + uint64_t ElemBits = IT->getSizeInBits().getFixedValue(); + if (!getABICompatInfo().Flags.ClassifyIntegerMMXAsSSE && + (ElemBits == 64 || ElemBits == 32)) { + Current = Integer; + } else + Current = SSE; + } else + Current = SSE; + // If this type crosses an eightbyte boundary, it should be + // split. + if (OffsetBase && OffsetBase != 64) + Hi = Lo; + } else if (Size == 128 || + (IsNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) { + if (const auto *IT = dyn_cast(ElementType)) { + uint64_t ElemBits = IT->getSizeInBits().getFixedValue(); + // gcc passes 256 and 512 bit vectors in memory. :( + if (getABICompatInfo().Flags.PassInt128VectorsInMem && Size != 128 && + ElemBits == 128) + return; + } + + // Arguments of 256-bits are split into four eightbyte chunks. The + // least significant one belongs to class SSE and all the others to class + // SSEUP. The original Lo and Hi design considers that types can't be + // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. + // This design isn't correct for 256-bits, but since there're no cases + // where the upper parts would need to be inspected, avoid adding + // complexity and just consider Hi to match the 64-256 part. + // + // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in + // registers if they are "named", i.e. not part of the "..." of a + // variadic function. + // + // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are + // split into eight eightbyte chunks, one SSE and seven SSEUP. + Lo = SSE; + Hi = SSEUp; + } + return; + } + + if (const auto *CT = dyn_cast(T)) { + const Type *ElementType = CT->getElementType(); + uint64_t Size = T->getSizeInBits().getFixedValue(); + + if (isa(ElementType)) { + if (Size <= 64) + Current = Integer; + else if (Size <= 128) + Lo = Hi = Integer; + } else if (const auto *EFT = dyn_cast(ElementType)) { + const auto *FltSem = EFT->getSemantics(); + if (FltSem == &llvm::APFloat::IEEEhalf() || + FltSem == &llvm::APFloat::IEEEsingle() || + FltSem == &llvm::APFloat::BFloat()) + Current = SSE; + else if (FltSem == &llvm::APFloat::IEEEquad()) + Current = Memory; + else if (FltSem == &llvm::APFloat::x87DoubleExtended()) + Current = Complex_X87; + else if (FltSem == &llvm::APFloat::IEEEdouble()) + Lo = Hi = SSE; + else + llvm_unreachable("Unexpected long double representation!"); + } + + uint64_t ElementSize = ElementType->getSizeInBits().getFixedValue(); + // If this complex type crosses an eightbyte boundary then it + // should be split. + uint64_t EbReal = OffsetBase / 64; + uint64_t EbImag = (OffsetBase + ElementSize) / 64; + if (Hi == NoClass && EbReal != EbImag) + Hi = Lo; + + return; + } + + if (const auto *AT = dyn_cast(T)) { + uint64_t Size = AT->getSizeInBits().getFixedValue(); + + if (!IsRegCall && Size > 512) + return; + + const Type *ElementType = AT->getElementType(); + uint64_t ElemAlign = ElementType->getAlignment().value() * 8; + if (OffsetBase % ElemAlign) + return; + + Current = NoClass; + uint64_t EltSize = ElementType->getSizeInBits().getFixedValue(); + uint64_t ArraySize = AT->getNumElements(); + + if (Size > 128 && + (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel))) + return; + + for (uint64_t I = 0, Offset = OffsetBase; I < ArraySize; + ++I, Offset += EltSize) { + Class FieldLo, FieldHi; + classify(ElementType, Offset, FieldLo, FieldHi, IsNamedArg); + Lo = merge(Lo, FieldLo); + Hi = merge(Hi, FieldHi); + if (Lo == Memory || Hi == Memory) + break; + } + postMerge(Size, Lo, Hi); + assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); + return; + } + if (const auto *RT = dyn_cast(T)) { + uint64_t Size = RT->getSizeInBits().getFixedValue(); + + if (containsMatrixField(RT)) { + Lo = Memory; + return; + } + + // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger + // than eight eightbytes, ..., it has class MEMORY. + if (Size > 512) + return; + + // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial + // copy constructor or a non-trivial destructor, it is passed by invisible + // reference. + if (getRecordArgABI(RT, RT->isCXXRecord())) + return; + + // Assume variable sized types are passed in memory. + if (RT->hasFlexibleArrayMember()) + return; + + // Reset Lo class, this will be recomputed. + Current = NoClass; + + // If this is a C++ record, classify the bases first. + if (RT->isCXXRecord()) { + for (const auto &Base : RT->getBaseClasses()) { + + // Classify this field. + // + // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a + // single eightbyte, each is classified separately. Each eightbyte gets + // initialized to class NO_CLASS. + Class FieldLo, FieldHi; + uint64_t Offset = OffsetBase + Base.OffsetInBits; + classify(Base.FieldType, Offset, FieldLo, FieldHi, IsNamedArg); + Lo = merge(Lo, FieldLo); + Hi = merge(Hi, FieldHi); + + if (getABICompatInfo().Flags.ReturnCXXRecordGreaterThan128InMem && + (Size > 128 && + (Size != Base.FieldType->getSizeInBits().getFixedValue() || + Size > getNativeVectorSizeForAVXABI(AVXLevel)))) { + Lo = Memory; + postMerge(Size, Lo, Hi); + return; + } + + if (Lo == Memory || Hi == Memory) { + postMerge(Size, Lo, Hi); + return; + } + } + } + + // Classify the fields one at a time, merging the results. + + bool IsUnion = RT->isUnion() && !getABICompatInfo().Flags.Clang11Compat; + for (const auto &Field : RT->getFields()) { + uint64_t Offset = OffsetBase + Field.OffsetInBits; + bool BitField = Field.IsBitField; + + if (BitField && Field.IsUnnamedBitfield) + continue; + + if (Size > 128 && + ((!IsUnion && + Size != Field.FieldType->getSizeInBits().getFixedValue()) || + Size > getNativeVectorSizeForAVXABI(AVXLevel))) { + Lo = Memory; + postMerge(Size, Lo, Hi); + return; + } + + bool IsInMemory = Offset % (Field.FieldType->getAlignment().value() * 8); + if (!BitField && IsInMemory) { + Lo = Memory; + postMerge(Size, Lo, Hi); + return; + } + + Class FieldLo, FieldHi; + + if (BitField) { + uint64_t BitFieldSize = Field.BitFieldWidth; + uint64_t EbLo = Offset / 64; + uint64_t EbHi = (Offset + BitFieldSize - 1) / 64; + + if (EbLo) { + assert(EbHi == EbLo && "Invalid classification, type > 16 bytes."); + FieldLo = NoClass; + FieldHi = Integer; + } else { + FieldLo = Integer; + FieldHi = EbHi ? Integer : NoClass; + } + } else + classify(Field.FieldType, Offset, FieldLo, FieldHi, IsNamedArg); + + Lo = merge(Lo, FieldLo); + Hi = merge(Hi, FieldHi); + if (Lo == Memory || Hi == Memory) + break; + } + postMerge(Size, Lo, Hi); + return; + } + + Lo = Memory; + Hi = NoClass; +} + +const Type * +X86_64ABIInfo::useFirstFieldIfTransparentUnion(const Type *Ty) const { + if (const auto *RT = dyn_cast(Ty)) { + if (RT->isUnion() && RT->isTransparentUnion()) { + auto Fields = RT->getFields(); + assert(!Fields.empty() && "sema created an empty transparent union"); + return Fields.front().FieldType; + } + } + return Ty; +} + +ABIArgInfo +X86_64ABIInfo::classifyArgumentType(const Type *Ty, unsigned FreeIntRegs, + unsigned &NeededInt, unsigned &NeededSSE, + bool IsNamedArg, bool IsRegCall) const { + + Ty = useFirstFieldIfTransparentUnion(Ty); + + X86_64ABIInfo::Class Lo, Hi; + classify(Ty, 0, Lo, Hi, IsNamedArg, IsRegCall); + + // Check some invariants + assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); + assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); + + NeededInt = 0; + NeededSSE = 0; + const Type *ResType = nullptr; + + switch (Lo) { + case NoClass: + if (Hi == NoClass) + return ABIArgInfo::getIgnore(); + // If the low part is just padding, it takes no register, leave ResType + // null. + assert((Hi == SSE || Hi == Integer || Hi == X87UP) && + "Unknown missing lo part"); + break; + + // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument + // on the stack. + case Memory: + // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or + // COMPLEX_X87, it is passed in memory. + case X87: + case Complex_X87: + if (getRecordArgABI(dyn_cast(Ty)) == RAA_Indirect) + ++NeededInt; + return getIndirectResult(Ty, FreeIntRegs); + + case SSEUp: + case X87UP: + llvm_unreachable("Invalid classification for lo word."); + + // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next + // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 + // and %r9 is used. + case Integer: + ++NeededInt; + + // Pick an 8-byte type based on the preferred type. + ResType = getIntegerTypeAtOffset(Ty, 0, Ty, 0); + + // If we have a sign or zero extended integer, make sure to return Extend + // so that the parameter gets the right LLVM IR attributes. + if (Hi == NoClass && ResType->isInteger()) { + if (Ty->isInteger() && isPromotableInteger(cast(Ty))) + return ABIArgInfo::getExtend(Ty); + } + + if (ResType->isInteger() && ResType->getSizeInBits() == 128) { + assert(Hi == Integer); + ++NeededInt; + return ABIArgInfo::getDirect(ResType); + } + break; + + // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next + // available SSE register is used, the registers are taken in the + // order from %xmm0 to %xmm7. + case SSE: + ResType = getSSETypeAtOffset(Ty, 0, Ty, 0); + ++NeededSSE; + break; + } + + const Type *HighPart = nullptr; + switch (Hi) { + // Memory was handled previously, Complex_X87 and X87 should + // never occur as hi classes, and X87UP must be preceded by X87, + // which is passed in memory. + case Memory: + case X87: + case Complex_X87: + llvm_unreachable("Invalid classification for hi word."); + + case NoClass: + break; + + case Integer: + ++NeededInt; + // Pick an 8-byte type based on the preferred type. + HighPart = getIntegerTypeAtOffset(Ty, 8, Ty, 8); + + if (Lo == NoClass) // Pass HighPart at offset 8 in memory. + return ABIArgInfo::getDirect(HighPart, 8); + break; + + // X87UP generally doesn't occur here (long double is passed in + // memory), except in situations involving unions. + case X87UP: + case SSE: + ++NeededSSE; + HighPart = getSSETypeAtOffset(Ty, 8, Ty, 8); + + if (Lo == NoClass) // Pass HighPart at offset 8 in memory. + return ABIArgInfo::getDirect(HighPart, 8); + break; + + // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the + // eightbyte is passed in the upper half of the last used SSE + // register. This only happens when 128-bit vectors are passed. + case SSEUp: + assert(Lo == SSE && "Unexpected SSEUp classification"); + ResType = getByteVectorType(Ty); + break; + } + + // If a high part was specified, merge it together with the low part. It is + // known to pass in the high eightbyte of the result. We do this by forming a + // first class struct aggregate with the high and low part: {low, high} + if (HighPart) + ResType = createPairType(ResType, HighPart); + + return ABIArgInfo::getDirect(ResType); +} + +ABIArgInfo X86_64ABIInfo::classifyReturnType(const Type *RetTy) const { + // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the + // classification algorithm. + + X86_64ABIInfo::Class Lo, Hi; + classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true); + + // Check some invariants + assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); + assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); + + const Type *ResType = nullptr; + switch (Lo) { + case NoClass: + if (Hi == NoClass) + return ABIArgInfo::getIgnore(); + // If the low part is just padding, it takes no register, leave ResType + // null. + assert((Hi == SSE || Hi == Integer || Hi == X87UP) && + "Unknown missing lo part"); + break; + case SSEUp: + case X87UP: + llvm_unreachable("Invalid classification for lo word."); + + // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via + // hidden argument. + case Memory: + return getIndirectReturnResult(RetTy); + + // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next + // available register of the sequence %rax, %rdx is used. + case Integer: + ResType = getIntegerTypeAtOffset(RetTy, 0, RetTy, 0); + // If we have a sign or zero extended integer, make sure to return Extend + // so that the parameter gets the right LLVM IR attributes. + if (Hi == NoClass && ResType->isInteger()) { + if (const IntegerType *IntTy = dyn_cast(RetTy)) { + if (isPromotableInteger(IntTy)) { + ABIArgInfo Info = ABIArgInfo::getExtend(RetTy); + return Info; + } + } + } + if (ResType->isInteger() && ResType->getSizeInBits() == 128) { + assert(Hi == Integer); + return ABIArgInfo::getDirect(ResType); + } + break; + + // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next + // available SSE register of the sequence %xmm0, %xmm1 is used. + case SSE: + ResType = getSSETypeAtOffset(RetTy, 0, RetTy, 0); + break; + + // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is + // returned on the X87 stack in %st0 as 80-bit x87 number. + case X87: + ResType = TB.getFloatType(APFloat::x87DoubleExtended(), Align(16)); + break; + + // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real + // part of the value is returned in %st0 and the imaginary part in + // %st1. + case Complex_X87: + assert(Hi == Complex_X87 && "Unexpected ComplexX87 classification."); + { + const Type *X87Type = + TB.getFloatType(APFloat::x87DoubleExtended(), Align(16)); + FieldInfo Fields[] = {FieldInfo(X87Type, 0), FieldInfo(X87Type, 128)}; + ResType = + TB.getCoercedRecordType(Fields, TypeSize::getFixed(256), Align(16)); + } + break; + } + + const Type *HighPart = nullptr; + switch (Hi) { + // Memory was handled previously and X87 should + // never occur as a hi class. + case Memory: + case X87: + llvm_unreachable("Invalid classification for hi word."); + + case Complex_X87: + case NoClass: + break; + + case Integer: + HighPart = getIntegerTypeAtOffset(RetTy, 8, RetTy, 8); + if (Lo == NoClass) + return ABIArgInfo::getDirect(HighPart, 8); + break; + + case SSE: + HighPart = getSSETypeAtOffset(RetTy, 8, RetTy, 8); + if (Lo == NoClass) + return ABIArgInfo::getDirect(HighPart, 8); + break; + + // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte + // is passed in the next available eightbyte chunk if the last used + // vector register. + // + // SSEUP should always be preceded by SSE, just widen. + case SSEUp: + assert(Lo == SSE && "Unexpected SSEUp classification."); + ResType = getByteVectorType(RetTy); + break; + + // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is + // returned together with the previous X87 value in %st0. + case X87UP: + // If X87Up is preceded by X87, we don't need to do + // anything. However, in some cases with unions it may not be + // preceded by X87. In such situations we follow gcc and pass the + // extra bits in an SSE reg. + if (Lo != X87) { + HighPart = getSSETypeAtOffset(RetTy, 8, RetTy, 8); + if (Lo == NoClass) // Return HighPart at offset 8 in memory. + return ABIArgInfo::getDirect(HighPart, 8); + } + break; + } + + // If a high part was specified, merge it together with the low part. It is + // known to pass in the high eightbyte of the result. We do this by forming a + // first class struct aggregate with the high and low part: {low, high} + if (HighPart) + ResType = createPairType(ResType, HighPart); + + return ABIArgInfo::getDirect(ResType); +} + +/// Given a high and low type that can ideally +/// be used as elements of a two register pair to pass or return, return a +/// first class aggregate to represent them. For example, if the low part of +/// a by-value argument should be passed as i32* and the high part as float, +/// return {i32*, float}. +const Type *X86_64ABIInfo::createPairType(const Type *Lo, + const Type *Hi) const { + // In order to correctly satisfy the ABI, we need to the high part to start + // at offset 8. If the high and low parts we inferred are both 4-byte types + // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have + // the second element at offset 8. Check for this: + unsigned LoSize = (unsigned)Lo->getTypeAllocSize(); + Align HiAlign = Hi->getAlignment(); + unsigned HiStart = alignTo(LoSize, HiAlign); + + assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); + + // To handle this, we have to increase the size of the low part so that the + // second element will start at an 8 byte offset. We can't increase the size + // of the second element because it might make us access off the end of the + // struct. + const Type *AdjustedLo = Lo; + if (HiStart != 8) { + // There are usually two sorts of types the ABI generation code can produce + // for the low part of a pair that aren't 8 bytes in size: half, float or + // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and + // NaCl). + // Promote these to a larger type. + if (Lo->isFloat()) { + const FloatType *FT = cast(Lo); + if (FT->getSemantics() == &APFloat::IEEEhalf() || + FT->getSemantics() == &APFloat::IEEEsingle() || + FT->getSemantics() == &APFloat::BFloat()) + AdjustedLo = TB.getFloatType(APFloat::IEEEdouble(), Align(8)); + } + // Promote integers and pointers to i64 + else if (Lo->isInteger() || Lo->isPointer()) + AdjustedLo = TB.getIntegerType(64, Align(8), /*Signed=*/false); + else + assert((Lo->isInteger() || Lo->isPointer()) && + "Invalid/unknown low type in pair"); + unsigned AdjustedLoSize = AdjustedLo->getSizeInBits().getFixedValue() / 8; + HiStart = alignTo(AdjustedLoSize, HiAlign); + } + + // Create the pair struct + FieldInfo Fields[] = {FieldInfo(AdjustedLo, 0), FieldInfo(Hi, HiStart * 8)}; + + // Verify the high part is at offset 8 + assert((8 * 8) == Fields[1].OffsetInBits && + "High part must be at offset 8 bytes"); + + return TB.getCoercedRecordType(Fields, TypeSize::getFixed(128), Align(8), + StructPacking::Default); +} + +static bool bitsContainNoUserData(const Type *Ty, unsigned StartBit, + unsigned EndBit) { + // If range is completely beyond type size, it's definitely padding + unsigned TySize = Ty->getSizeInBits().getFixedValue(); + if (TySize <= StartBit) + return true; + + // Handle arrays - check each element + if (const ArrayType *AT = dyn_cast(Ty)) { + const Type *EltTy = AT->getElementType(); + unsigned EltSize = EltTy->getSizeInBits().getFixedValue(); + + for (unsigned I = 0; I < AT->getNumElements(); ++I) { + unsigned EltOffset = I * EltSize; + if (EltOffset >= EndBit) + break; + + unsigned EltStart = (EltOffset < StartBit) ? StartBit - EltOffset : 0; + if (!bitsContainNoUserData(EltTy, EltStart, EndBit - EltOffset)) + return false; + } + return true; + } + + // Handle structs - check all fields and base classes + if (const RecordType *RT = dyn_cast(Ty)) { + if (RT->isUnion()) { + for (const auto &Field : RT->getFields()) { + if (Field.IsUnnamedBitfield) + continue; + + unsigned FieldStart = + (Field.OffsetInBits < StartBit) ? StartBit - Field.OffsetInBits : 0; + unsigned FieldEnd = + FieldStart + Field.FieldType->getSizeInBits().getFixedValue(); + + // Check if field overlaps with the queried range + if (FieldStart < EndBit && FieldEnd > StartBit) { + // There's an overlap, so there is user data + unsigned RelativeStart = + (StartBit > FieldStart) ? StartBit - FieldStart : 0; + unsigned RelativeEnd = + (EndBit < FieldEnd) + ? EndBit - FieldStart + : Field.FieldType->getSizeInBits().getFixedValue(); + + if (!bitsContainNoUserData(Field.FieldType, RelativeStart, + RelativeEnd)) { + return false; + } + } + } + return true; + } + // Check base classes first (for C++ records) + if (RT->isCXXRecord()) { + for (unsigned I = 0; I < RT->getNumBaseClasses(); ++I) { + const FieldInfo &Base = RT->getBaseClasses()[I]; + if (Base.OffsetInBits >= EndBit) + continue; + + unsigned BaseStart = + (Base.OffsetInBits < StartBit) ? StartBit - Base.OffsetInBits : 0; + if (!bitsContainNoUserData(Base.FieldType, BaseStart, + EndBit - Base.OffsetInBits)) + return false; + } + } + + for (unsigned I = 0; I < RT->getNumFields(); ++I) { + const FieldInfo &Field = RT->getFields()[I]; + if (Field.OffsetInBits >= EndBit) + break; + + unsigned FieldStart = + (Field.OffsetInBits < StartBit) ? StartBit - Field.OffsetInBits : 0; + if (!bitsContainNoUserData(Field.FieldType, FieldStart, + EndBit - Field.OffsetInBits)) + return false; + } + return true; + } + + // For unions, vectors, and primitives - assume all bits are user data + return false; +} + +const Type *X86_64ABIInfo::getIntegerTypeAtOffset(const Type *ABIType, + unsigned ABIOffset, + const Type *SourceTy, + unsigned SourceOffset, + bool InMemory) const { + + const Type *WorkingType = ABIType; + if (InMemory && ABIType->isInteger()) { + const auto *IT = cast(ABIType); + unsigned OriginalBitWidth = IT->getSizeInBits().getFixedValue(); + + unsigned WidenedBitWidth = OriginalBitWidth; + if (OriginalBitWidth <= 8) { + WidenedBitWidth = 8; + } else { + WidenedBitWidth = llvm::bit_ceil(OriginalBitWidth); + } + + if (WidenedBitWidth != OriginalBitWidth) { + WorkingType = TB.getIntegerType(WidenedBitWidth, ABIType->getAlignment(), + IT->isSigned()); + } + } + // If we're dealing with an un-offset ABI type, then it means that we're + // returning an 8-byte unit starting with it. See if we can safely use it. + if (ABIOffset == 0) { + // Pointers and int64's always fill the 8-byte unit. + if ((WorkingType->isPointer() && Has64BitPointers) || + (WorkingType->isInteger() && + cast(WorkingType)->getSizeInBits() == 64)) + return ABIType; + + // If we have a 1/2/4-byte integer, we can use it only if the rest of the + // goodness in the source type is just tail padding. This is allowed to + // kick in for struct {double,int} on the int, but not on + // struct{double,int,int} because we wouldn't return the second int. We + // have to do this analysis on the source type because we can't depend on + // unions being lowered a specific way etc. + if ((WorkingType->isInteger() && + (cast(WorkingType)->getSizeInBits() == 1 || + cast(WorkingType)->getSizeInBits() == 8 || + cast(WorkingType)->getSizeInBits() == 16 || + cast(WorkingType)->getSizeInBits() == 32)) || + (WorkingType->isPointer() && !Has64BitPointers)) { + + unsigned BitWidth = WorkingType->isPointer() + ? 32 + : cast(WorkingType)->getSizeInBits(); + + if (bitsContainNoUserData(SourceTy, SourceOffset * 8 + BitWidth, + SourceOffset * 8 + 64)) + return WorkingType; + } + } + + if (const auto *RTy = dyn_cast(ABIType)) { + if (RTy->isUnion()) { + const Type *ReducedType = reduceUnionForX8664(RTy, TB); + if (ReducedType) + return getIntegerTypeAtOffset(ReducedType, ABIOffset, SourceTy, + SourceOffset, true); + } + if (const FieldInfo *Element = + RTy->getElementContainingOffset(ABIOffset * 8)) { + + unsigned ElementOffsetBytes = Element->OffsetInBits / 8; + return getIntegerTypeAtOffset(Element->FieldType, + ABIOffset - ElementOffsetBytes, SourceTy, + SourceOffset, true); + } + } + + if (const auto *ATy = dyn_cast(ABIType)) { + const Type *EltTy = ATy->getElementType(); + unsigned EltSize = EltTy->getSizeInBits() / 8; + if (EltSize > 0) { + unsigned EltOffset = (ABIOffset / EltSize) * EltSize; + return getIntegerTypeAtOffset(EltTy, ABIOffset - EltOffset, SourceTy, + SourceOffset, true); + } + } + + // If we have a 128-bit integer, we can pass it safely using an i128 + // so we return that + if (ABIType->isInteger() && ABIType->getSizeInBits() == 128) { + assert(ABIOffset == 0); + return ABIType; + } + + unsigned TySizeInBytes = + llvm::divideCeil(SourceTy->getSizeInBits().getFixedValue(), 8); + if (auto *IT = dyn_cast(SourceTy)) { + if (IT->isBitInt()) + TySizeInBytes = + alignTo(SourceTy->getSizeInBits().getFixedValue(), 64) / 8; + } + assert(TySizeInBytes != SourceOffset && "Empty field?"); + unsigned AvailableSize = TySizeInBytes - SourceOffset; + return TB.getIntegerType(std::min(AvailableSize, 8U) * 8, Align(1), false); +} +/// Returns the floating point type at the specified offset within a type, or +/// nullptr if no floating point type is found at that offset. +const Type *X86_64ABIInfo::getFPTypeAtOffset(const Type *Ty, + unsigned Offset) const { + // Check for direct match at offset 0 + if (Offset == 0 && Ty->isFloat()) + return Ty; + + if (const ComplexType *CT = dyn_cast(Ty)) { + const Type *ElementType = CT->getElementType(); + unsigned ElementSize = ElementType->getSizeInBits().getFixedValue() / 8; + + if (Offset == 0 || Offset == ElementSize) + return ElementType; + return nullptr; + } + + // Handle struct types by checking each field + if (const RecordType *RT = dyn_cast(Ty)) { + if (const FieldInfo *Element = RT->getElementContainingOffset(Offset * 8)) { + unsigned ElementOffsetBytes = Element->OffsetInBits / 8; + return getFPTypeAtOffset(Element->FieldType, Offset - ElementOffsetBytes); + } + } + + // Handle array types + if (const ArrayType *AT = dyn_cast(Ty)) { + const Type *EltTy = AT->getElementType(); + unsigned EltSize = EltTy->getSizeInBits() / 8; + unsigned EltIndex = Offset / EltSize; + + return getFPTypeAtOffset(EltTy, Offset - (EltIndex * EltSize)); + } + + // No floating point type found at this offset + return nullptr; +} + +/// Helper to check if a floating point type matches specific semantics +static bool isFloatTypeWithSemantics(const Type *Ty, + const fltSemantics &Semantics) { + if (!Ty->isFloat()) + return false; + const FloatType *FT = cast(Ty); + return FT->getSemantics() == &Semantics; +} + +/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the +/// low 8 bytes of an XMM register, corresponding to the SSE class. +const Type *X86_64ABIInfo::getSSETypeAtOffset(const Type *ABIType, + unsigned ABIOffset, + const Type *SourceTy, + unsigned SourceOffset) const { + + if (const auto *RTy = dyn_cast(ABIType)) { + if (RTy->isUnion()) { + const Type *ReducedType = reduceUnionForX8664(RTy, TB); + if (ReducedType) { + return getSSETypeAtOffset(ReducedType, ABIOffset, SourceTy, + SourceOffset); + } + } + } + + auto Is16bitFpTy = [](const Type *T) { + return isFloatTypeWithSemantics(T, APFloat::IEEEhalf()) || + isFloatTypeWithSemantics(T, APFloat::BFloat()); + }; + + // Get the floating point type at the requested offset + const Type *T0 = getFPTypeAtOffset(ABIType, ABIOffset); + if (!T0 || isFloatTypeWithSemantics(T0, APFloat::IEEEdouble())) + return TB.getFloatType(APFloat::IEEEdouble(), Align(8)); + + // Calculate remaining source size in bytes + unsigned SourceSize = + (SourceTy->getSizeInBits().getFixedValue() / 8) - SourceOffset; + + // Try to get adjacent FP type + const Type *T1 = nullptr; + unsigned T0Size = + alignTo(T0->getSizeInBits().getFixedValue(), T0->getAlignment().value()) / + 8; + if (SourceSize > T0Size) + T1 = getFPTypeAtOffset(ABIType, ABIOffset + T0Size); + + if (T1 == nullptr) { + if (Is16bitFpTy(T0) && SourceSize > 4) + T1 = getFPTypeAtOffset(ABIType, ABIOffset + 4); + + if (T1 == nullptr) + return T0; + } + // Handle vector cases + if (isFloatTypeWithSemantics(T0, APFloat::IEEEsingle()) && + isFloatTypeWithSemantics(T1, APFloat::IEEEsingle())) + return TB.getVectorType(T0, ElementCount::getFixed(2), Align(8)); + + if (Is16bitFpTy(T0) && Is16bitFpTy(T1)) { + const Type *T2 = nullptr; + if (SourceSize > 4) + T2 = getFPTypeAtOffset(ABIType, ABIOffset + 4); + if (!T2) + return TB.getVectorType(T0, ElementCount::getFixed(2), Align(8)); + return TB.getVectorType(T0, ElementCount::getFixed(4), Align(8)); + } + + // Mixed half-float cases + if (Is16bitFpTy(T0) || Is16bitFpTy(T1)) + return TB.getVectorType(TB.getFloatType(APFloat::IEEEhalf(), Align(2)), + ElementCount::getFixed(4), Align(8)); + + // Default to double + return TB.getFloatType(APFloat::IEEEdouble(), Align(8)); +} + +/// The ABI specifies that a value should be passed in a full vector XMM/YMM +/// register. Pick an LLVM IR type that will be passed as a vector register. +const Type *X86_64ABIInfo::getByteVectorType(const Type *Ty) const { + // Wrapper structs/arrays that only contain vectors are passed just like + // vectors; strip them off if present. + if (const Type *InnerTy = isSingleElementStruct(Ty)) + Ty = InnerTy; + + // Handle vector types + if (const VectorType *VT = dyn_cast(Ty)) { + // Don't pass vXi128 vectors in their native type, the backend can't + // legalize them. + if (getABICompatInfo().Flags.PassInt128VectorsInMem && + VT->getElementType()->isInteger() && + cast(VT->getElementType())->getSizeInBits() == 128) { + unsigned Size = VT->getSizeInBits().getFixedValue(); + return TB.getVectorType(TB.getIntegerType(64, Align(8), /*Signed=*/false), + ElementCount::getFixed(Size / 64), + Align(Size / 8)); + } + return VT; + } + + // Handle fp128 + if (isFloatTypeWithSemantics(Ty, APFloat::IEEEquad())) + return Ty; + + // We couldn't find the preferred IR vector type for 'Ty'. + unsigned Size = Ty->getSizeInBits().getFixedValue(); + assert((Size == 128 || Size == 256 || Size == 512) && "Invalid vector size"); + + return TB.getVectorType(TB.getFloatType(APFloat::IEEEdouble(), Align(8)), + ElementCount::getFixed(Size / 64), Align(Size / 8)); +} + +// Returns the single element if this is a single-element struct wrapper +const Type *X86_64ABIInfo::isSingleElementStruct(const Type *Ty) const { + const auto *RT = dyn_cast(Ty); + if (!RT) + return nullptr; + + if (RT->isPolymorphic() || RT->hasNonTrivialCopyConstructor() || + RT->hasNonTrivialDestructor() || RT->hasFlexibleArrayMember() || + RT->getNumVirtualBaseClasses() != 0) + return nullptr; + + const Type *Found = nullptr; + + for (const auto &Base : RT->getBaseClasses()) { + const Type *BaseTy = Base.FieldType; + auto *BaseRT = dyn_cast(BaseTy); + + if (!BaseRT || isEmptyRecord(BaseRT)) + continue; + + const Type *Elem = isSingleElementStruct(BaseTy); + if (!Elem || Found) + return nullptr; + Found = Elem; + } + + for (const auto &FI : RT->getFields()) { + if (isEmptyField(FI)) + continue; + + const Type *FTy = FI.FieldType; + + while (auto *AT = dyn_cast(FTy)) { + if (AT->getNumElements() != 1) + break; + FTy = AT->getElementType(); + } + + const Type *Elem; + if (auto *InnerRT = dyn_cast(FTy)) + Elem = isSingleElementStruct(InnerRT); + else + Elem = FTy; + if (!Elem || Found) + return nullptr; + Found = Elem; + } + + if (!Found) + return nullptr; + if (Found->getSizeInBits() != Ty->getSizeInBits()) + return nullptr; + + return Found; +} + +bool X86_64ABIInfo::isIllegalVectorType(const Type *Ty) const { + if (const auto *VecTy = dyn_cast(Ty)) { + uint64_t Size = VecTy->getSizeInBits().getFixedValue(); + unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel); + + // Vectors <= 64 bits or > largest supported vector size are illegal + if (Size <= 64 || Size > LargestVector) + return true; + + // Check for 128-bit integer element vectors that should be passed in memory + const Type *EltTy = VecTy->getElementType(); + if (getABICompatInfo().Flags.PassInt128VectorsInMem && EltTy->isInteger()) { + const auto *IntTy = cast(EltTy); + if (IntTy->getSizeInBits().getFixedValue() == 128) + return true; + } + } + return false; +} + +ABIArgInfo X86_64ABIInfo::getIndirectResult(const Type *Ty, + unsigned FreeIntRegs) const { + // If this is a scalar LLVM value then assume LLVM will pass it in the right + // place naturally. + // + // This assumption is optimistic, as there could be free registers available + // when we need to pass this argument in memory, and LLVM could try to pass + // the argument in the free register. This does not seem to happen currently, + // but this code would be much safer if we could mark the argument with + // 'onstack'. See PR12193. + if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty) && + !(Ty->isInteger() && cast(Ty)->isBitInt())) { + return (Ty->isInteger() && isPromotableInteger(cast(Ty)) + ? ABIArgInfo::getExtend(Ty) + : ABIArgInfo::getDirect()); + } + + // Check if this is a record type that needs special handling + if (auto RecordRAA = getRecordArgABI(Ty)) + return getNaturalAlignIndirect(Ty, RecordRAA == + RecordArgABI::RAA_DirectInMemory); + + // Compute the byval alignment. We specify the alignment of the byval in all + // cases so that the mid-level optimizer knows the alignment of the byval. + unsigned Align = + std::max(static_cast(Ty->getAlignment().value()), 8U); + + // Attempt to avoid passing indirect results using byval when possible. This + // is important for good codegen. + // + // We do this by coercing the value into a scalar type which the backend can + // handle naturally (i.e., without using byval). + // + // For simplicity, we currently only do this when we have exhausted all of the + // free integer registers. Doing this when there are free integer registers + // would require more care, as we would have to ensure that the coerced value + // did not claim the unused register. That would require either reording the + // arguments to the function (so that any subsequent inreg values came first), + // or only doing this optimization when there were no following arguments that + // might be inreg. + // + // We currently expect it to be rare (particularly in well written code) for + // arguments to be passed on the stack when there are still free integer + // registers available (this would typically imply large structs being passed + // by value), so this seems like a fair tradeoff for now. + // + // We can revisit this if the backend grows support for 'onstack' parameter + // attributes. See PR12193. + if (FreeIntRegs == 0) { + uint64_t Size = Ty->getSizeInBits().getFixedValue(); + + // If this type fits in an eightbyte, coerce it into the matching integral + // type, which will end up on the stack (with alignment 8). + if (Align == 8 && Size <= 64) { + const Type *IntTy = + TB.getIntegerType(Size, llvm::Align(8), /*Signed=*/false); + return ABIArgInfo::getDirect(IntTy); + } + } + + return ABIArgInfo::getIndirect(Align); +} + +ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(const Type *Ty) const { + // If this is a scalar value, handle it specially + if (!isAggregateTypeForABI(Ty)) { + // Handle integer types that need extension + if (Ty->isInteger()) { + const IntegerType *IntTy = cast(Ty); + if (isPromotableInteger(IntTy)) { + ABIArgInfo Info = ABIArgInfo::getExtend(Ty); + return Info; + } + if (IntTy->isBitInt()) + return getNaturalAlignIndirect(IntTy); + } + return ABIArgInfo::getDirect(); + } + + // For aggregate types or other cases, return as indirect + return getNaturalAlignIndirect(Ty); +} + +static bool classifyCXXReturnType(ABIFunctionInfo &FI, const ABIInfo &Info) { + const abi::Type *Ty = FI.getReturnType(); + + if (const auto *RT = llvm::dyn_cast(Ty)) { + if (!RT->isCXXRecord() && !RT->canPassInRegisters()) { + ABIArgInfo IndirectInfo = + ABIArgInfo::getIndirect(RT->getAlignment().value()); + FI.getReturnInfo() = IndirectInfo; + return true; + } + if (!RT->canPassInRegisters()) { + ABIArgInfo IndirectInfo = + ABIArgInfo::getIndirect(Ty->getAlignment().value(), false, 0, false); + FI.getReturnInfo() = IndirectInfo; + return true; + } + } + + return false; +} + +void X86_64ABIInfo::computeInfo(ABIFunctionInfo &FI) const { + CallingConv::ID CallingConv = FI.getCallingConvention(); + + if (CallingConv == CallingConv::Win64 || + CallingConv == CallingConv::X86_RegCall) + return; + + bool IsRegCall = false; + + unsigned FreeIntRegs = 6; + unsigned FreeSSERegs = 8; + unsigned NeededInt = 0, NeededSSE = 0; + + if (!classifyCXXReturnType(FI, *this)) { + const Type *RetTy = FI.getReturnType(); + ABIArgInfo RetInfo = classifyReturnType(RetTy); + FI.getReturnInfo() = RetInfo; + } + + if (FI.getReturnInfo().isIndirect()) + --FreeIntRegs; + + unsigned NumRequiredArgs = FI.getNumRequiredArgs(); + + unsigned ArgNo = 0; + for (auto IT = FI.arg_begin(), IE = FI.arg_end(); IT != IE; ++IT, ++ArgNo) { + bool IsNamedArg = ArgNo < NumRequiredArgs; + const Type *ArgTy = IT->ABIType; + NeededInt = 0; + NeededSSE = 0; + + ABIArgInfo ArgInfo = classifyArgumentType(ArgTy, FreeIntRegs, NeededInt, + NeededSSE, IsNamedArg, IsRegCall); + + // AMD64-ABI 3.2.3p3: If there are no registers available for any + // eightbyte of an argument, the whole argument is passed on the + // stack. If registers have already been assigned for some + // eightbytes of such an argument, the assignments get reverted. + if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { + FreeIntRegs -= NeededInt; + FreeSSERegs -= NeededSSE; + IT->ArgInfo = ArgInfo; + } else { + // Not enough registers, pass on stack + ABIArgInfo IndirectInfo = getIndirectResult(ArgTy, FreeIntRegs); + IT->ArgInfo = IndirectInfo; + } + } +} + +class X8664TargetCodeGenInfo : public TargetCodeGenInfo { +public: + X8664TargetCodeGenInfo(TypeBuilder &TB, const Triple &Triple, + X86AVXABILevel AVXLevel, bool Has64BitPointers, + const ABICompatInfo &Compat) + : TargetCodeGenInfo(std::make_unique( + TB, Triple, AVXLevel, Has64BitPointers, Compat)) {} +}; + +std::unique_ptr +createX8664TargetCodeGenInfo(TypeBuilder &TB, const Triple &Triple, + X86AVXABILevel AVXLevel, bool Has64BitPointers, + const ABICompatInfo &Compat) { + return std::make_unique(TB, Triple, AVXLevel, + Has64BitPointers, Compat); +} +} // namespace abi +} // namespace llvm diff --git a/llvm/lib/CMakeLists.txt b/llvm/lib/CMakeLists.txt index a9432977718c6..c2ce8fd6eb686 100644 --- a/llvm/lib/CMakeLists.txt +++ b/llvm/lib/CMakeLists.txt @@ -3,6 +3,7 @@ include(LLVM-Build) # `Demangle', `Support' and `TableGen' libraries are added on the top-level # CMakeLists.txt +add_subdirectory(ABI) add_subdirectory(IR) add_subdirectory(FuzzMutate) add_subdirectory(FileCheck)