diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 0fac1b211239a..281592e6ed26b 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -37,6 +37,8 @@ struct MissingFeatures { static bool opGlobalDLLImportExport() { return false; } static bool opGlobalPartition() { return false; } static bool opGlobalUsedOrCompilerUsed() { return false; } + static bool setDSOLocal() { return false; } + static bool setComdat() { return false; } static bool supportIFuncAttr() { return false; } static bool supportVisibility() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 6a1746a7ad0ac..58345b45c97bc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -89,6 +89,11 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return cir::ConstRecordAttr::get(sTy, arrayAttr); } + cir::TypeInfoAttr getTypeInfo(mlir::ArrayAttr fieldsAttr) { + cir::ConstRecordAttr anonRecord = getAnonConstRecord(fieldsAttr); + return cir::TypeInfoAttr::get(anonRecord.getType(), fieldsAttr); + } + std::string getUniqueAnonRecordName() { return getUniqueRecordName("anon"); } std::string getUniqueRecordName(const std::string &baseName) { diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index ae922599809b8..1dee77425c30d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -114,6 +114,9 @@ class CIRGenCXXABI { virtual void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) = 0; + virtual mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, + QualType ty) = 0; + /// Get the type of the implicit "this" parameter used by a method. May return /// zero if no specific type is applicable, e.g. if the ABI expects the "this" /// parameter to point to some artificial offset in a complete object due to diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 0bf6cf556787c..debea8af66b50 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -103,6 +103,9 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { const CXXRecordDecl *rd) override; void emitVirtualInheritanceTables(const CXXRecordDecl *rd) override; + mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, + QualType ty) override; + bool doStructorsInitializeVPtrs(const CXXRecordDecl *vtableClass) override { return true; } @@ -111,6 +114,34 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { getVirtualBaseClassOffset(mlir::Location loc, CIRGenFunction &cgf, Address thisAddr, const CXXRecordDecl *classDecl, const CXXRecordDecl *baseClassDecl) override; + + /**************************** RTTI Uniqueness ******************************/ +protected: + /// Returns true if the ABI requires RTTI type_info objects to be unique + /// across a program. + virtual bool shouldRTTIBeUnique() const { return true; } + +public: + /// What sort of unique-RTTI behavior should we use? + enum RTTIUniquenessKind { + /// We are guaranteeing, or need to guarantee, that the RTTI string + /// is unique. + RUK_Unique, + + /// We are not guaranteeing uniqueness for the RTTI string, so we + /// can demote to hidden visibility but must use string comparisons. + RUK_NonUniqueHidden, + + /// We are not guaranteeing uniqueness for the RTTI string, so we + /// have to use string comparisons, but we also have to emit it with + /// non-hidden visibility. + RUK_NonUniqueVisible + }; + + /// Return the required visibility status for the given type and linkage in + /// the current ABI. + RTTIUniquenessKind + classifyRTTIUniqueness(QualType canTy, cir::GlobalLinkageKind linkage) const; }; } // namespace @@ -424,6 +455,1038 @@ void CIRGenItaniumCXXABI::emitVirtualInheritanceTables( vtables.emitVTTDefinition(vtt, cgm.getVTableLinkage(rd), rd); } +namespace { +class CIRGenItaniumRTTIBuilder { + CIRGenModule &cgm; // Per-module state. + const CIRGenItaniumCXXABI &cxxABI; // Per-module state. + + /// The fields of the RTTI descriptor currently being built. + SmallVector fields; + + // Returns the mangled type name of the given type. + cir::GlobalOp getAddrOfTypeName(mlir::Location loc, QualType ty, + cir::GlobalLinkageKind linkage); + + /// descriptor of the given type. + mlir::Attribute getAddrOfExternalRTTIDescriptor(mlir::Location loc, + QualType ty); + + /// Build the vtable pointer for the given type. + void buildVTablePointer(mlir::Location loc, const Type *ty); + + /// Build an abi::__si_class_type_info, used for single inheritance, according + /// to the Itanium C++ ABI, 2.9.5p6b. + void buildSIClassTypeInfo(mlir::Location loc, const CXXRecordDecl *rd); + + /// Build an abi::__vmi_class_type_info, used for + /// classes with bases that do not satisfy the abi::__si_class_type_info + /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. + void buildVMIClassTypeInfo(mlir::Location loc, const CXXRecordDecl *rd); + +public: + CIRGenItaniumRTTIBuilder(const CIRGenItaniumCXXABI &abi, CIRGenModule &cgm) + : cgm(cgm), cxxABI(abi) {} + + /// Build the RTTI type info struct for the given type, or + /// link to an existing RTTI descriptor if one already exists. + mlir::Attribute buildTypeInfo(mlir::Location loc, QualType ty); + + /// Build the RTTI type info struct for the given type. + mlir::Attribute buildTypeInfo(mlir::Location loc, QualType ty, + cir::GlobalLinkageKind linkage, + mlir::SymbolTable::Visibility visibility); +}; +} // namespace + +// TODO(cir): Will be removed after sharing them with the classical codegen +namespace { + +// Pointer type info flags. +enum { + /// PTI_Const - Type has const qualifier. + PTI_Const = 0x1, + + /// PTI_Volatile - Type has volatile qualifier. + PTI_Volatile = 0x2, + + /// PTI_Restrict - Type has restrict qualifier. + PTI_Restrict = 0x4, + + /// PTI_Incomplete - Type is incomplete. + PTI_Incomplete = 0x8, + + /// PTI_ContainingClassIncomplete - Containing class is incomplete. + /// (in pointer to member). + PTI_ContainingClassIncomplete = 0x10, + + /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS). + // PTI_TransactionSafe = 0x20, + + /// PTI_Noexcept - Pointee is noexcept function (C++1z). + PTI_Noexcept = 0x40, +}; + +// VMI type info flags. +enum { + /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance. + VMI_NonDiamondRepeat = 0x1, + + /// VMI_DiamondShaped - Class is diamond shaped. + VMI_DiamondShaped = 0x2 +}; + +// Base class type info flags. +enum { + /// BCTI_Virtual - Base class is virtual. + BCTI_Virtual = 0x1, + + /// BCTI_Public - Base class is public. + BCTI_Public = 0x2 +}; + +/// Given a builtin type, returns whether the type +/// info for that type is defined in the standard library. +/// TODO(cir): this can unified with LLVM codegen +static bool typeInfoIsInStandardLibrary(const BuiltinType *ty) { + // Itanium C++ ABI 2.9.2: + // Basic type information (e.g. for "int", "bool", etc.) will be kept in + // the run-time support library. Specifically, the run-time support + // library should contain type_info objects for the types X, X* and + // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char, + // unsigned char, signed char, short, unsigned short, int, unsigned int, + // long, unsigned long, long long, unsigned long long, float, double, + // long double, char16_t, char32_t, and the IEEE 754r decimal and + // half-precision floating point types. + // + // GCC also emits RTTI for __int128. + // FIXME: We do not emit RTTI information for decimal types here. + + // Types added here must also be added to emitFundamentalRTTIDescriptors. + switch (ty->getKind()) { + case BuiltinType::WasmExternRef: + case BuiltinType::HLSLResource: + llvm_unreachable("NYI"); + case BuiltinType::Void: + case BuiltinType::NullPtr: + case BuiltinType::Bool: + case BuiltinType::WChar_S: + case BuiltinType::WChar_U: + case BuiltinType::Char_U: + case BuiltinType::Char_S: + case BuiltinType::UChar: + case BuiltinType::SChar: + case BuiltinType::Short: + case BuiltinType::UShort: + case BuiltinType::Int: + case BuiltinType::UInt: + case BuiltinType::Long: + case BuiltinType::ULong: + case BuiltinType::LongLong: + case BuiltinType::ULongLong: + case BuiltinType::Half: + case BuiltinType::Float: + case BuiltinType::Double: + case BuiltinType::LongDouble: + case BuiltinType::Float16: + case BuiltinType::Float128: + case BuiltinType::Ibm128: + case BuiltinType::Char8: + case BuiltinType::Char16: + case BuiltinType::Char32: + case BuiltinType::Int128: + case BuiltinType::UInt128: + return true; + +#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ + case BuiltinType::Id: +#include "clang/Basic/OpenCLImageTypes.def" +#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) case BuiltinType::Id: +#include "clang/Basic/OpenCLExtensionTypes.def" + case BuiltinType::OCLSampler: + case BuiltinType::OCLEvent: + case BuiltinType::OCLClkEvent: + case BuiltinType::OCLQueue: + case BuiltinType::OCLReserveID: +#define SVE_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/AArch64ACLETypes.def" +#define PPC_VECTOR_TYPE(Name, Id, Size) case BuiltinType::Id: +#include "clang/Basic/PPCTypes.def" +#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/RISCVVTypes.def" +#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id: +#include "clang/Basic/AMDGPUTypes.def" + case BuiltinType::ShortAccum: + case BuiltinType::Accum: + case BuiltinType::LongAccum: + case BuiltinType::UShortAccum: + case BuiltinType::UAccum: + case BuiltinType::ULongAccum: + case BuiltinType::ShortFract: + case BuiltinType::Fract: + case BuiltinType::LongFract: + case BuiltinType::UShortFract: + case BuiltinType::UFract: + case BuiltinType::ULongFract: + case BuiltinType::SatShortAccum: + case BuiltinType::SatAccum: + case BuiltinType::SatLongAccum: + case BuiltinType::SatUShortAccum: + case BuiltinType::SatUAccum: + case BuiltinType::SatULongAccum: + case BuiltinType::SatShortFract: + case BuiltinType::SatFract: + case BuiltinType::SatLongFract: + case BuiltinType::SatUShortFract: + case BuiltinType::SatUFract: + case BuiltinType::SatULongFract: + case BuiltinType::BFloat16: + return false; + + case BuiltinType::Dependent: +#define BUILTIN_TYPE(Id, SingletonId) +#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id: +#include "clang/AST/BuiltinTypes.def" + llvm_unreachable("asking for RRTI for a placeholder type!"); + + case BuiltinType::ObjCId: + case BuiltinType::ObjCClass: + case BuiltinType::ObjCSel: + llvm_unreachable("FIXME: Objective-C types are unsupported!"); + } + + llvm_unreachable("Invalid BuiltinType Kind!"); +} + +static bool typeInfoIsInStandardLibrary(const PointerType *pointerTy) { + QualType pointeeTy = pointerTy->getPointeeType(); + const auto *builtinTy = dyn_cast(pointeeTy); + if (!builtinTy) + return false; + + // Check the qualifiers. + Qualifiers quals = pointeeTy.getQualifiers(); + quals.removeConst(); + + if (!quals.empty()) + return false; + + return typeInfoIsInStandardLibrary(builtinTy); +} + +/// IsStandardLibraryRTTIDescriptor - Returns whether the type +/// information for the given type exists in the standard library. +static bool isStandardLibraryRttiDescriptor(QualType ty) { + // Type info for builtin types is defined in the standard library. + if (const auto *builtinTy = dyn_cast(ty)) + return typeInfoIsInStandardLibrary(builtinTy); + + // Type info for some pointer types to builtin types is defined in the + // standard library. + if (const auto *pointerTy = dyn_cast(ty)) + return typeInfoIsInStandardLibrary(pointerTy); + + return false; +} + +/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for +/// the given type exists somewhere else, and that we should not emit the type +/// information in this translation unit. Assumes that it is not a +/// standard-library type. +static bool shouldUseExternalRttiDescriptor(CIRGenModule &cgm, QualType ty) { + ASTContext &context = cgm.getASTContext(); + + // If RTTI is disabled, assume it might be disabled in the + // translation unit that defines any potential key function, too. + if (!context.getLangOpts().RTTI) + return false; + + if (const auto *recordTy = dyn_cast(ty)) { + const CXXRecordDecl *rd = + cast(recordTy->getOriginalDecl())->getDefinitionOrSelf(); + if (!rd->hasDefinition()) + return false; + + if (!rd->isDynamicClass()) + return false; + + // FIXME: this may need to be reconsidered if the key function + // changes. + // N.B. We must always emit the RTTI data ourselves if there exists a key + // function. + bool isDLLImport = rd->hasAttr(); + + // Don't import the RTTI but emit it locally. + if (cgm.getTriple().isOSCygMing()) + return false; + + if (cgm.getVTables().isVTableExternal(rd)) { + if (cgm.getTarget().hasPS4DLLImportExport()) + return true; + + return !isDLLImport || cgm.getTriple().isWindowsItaniumEnvironment(); + } + + if (isDLLImport) + return true; + } + + return false; +} + +/// Contains virtual and non-virtual bases seen when traversing a class +/// hierarchy. +struct SeenBases { + llvm::SmallPtrSet nonVirtualBases; + llvm::SmallPtrSet virtualBases; +}; + +/// Compute the value of the flags member in abi::__vmi_class_type_info. +/// +static unsigned computeVmiClassTypeInfoFlags(const CXXBaseSpecifier *base, + SeenBases &bases) { + + unsigned flags = 0; + auto *baseDecl = base->getType()->castAsCXXRecordDecl(); + + if (base->isVirtual()) { + // Mark the virtual base as seen. + if (!bases.virtualBases.insert(baseDecl).second) { + // If this virtual base has been seen before, then the class is diamond + // shaped. + flags |= VMI_DiamondShaped; + } else { + if (bases.nonVirtualBases.count(baseDecl)) + flags |= VMI_NonDiamondRepeat; + } + } else { + // Mark the non-virtual base as seen. + if (!bases.nonVirtualBases.insert(baseDecl).second) { + // If this non-virtual base has been seen before, then the class has non- + // diamond shaped repeated inheritance. + flags |= VMI_NonDiamondRepeat; + } else { + if (bases.virtualBases.count(baseDecl)) + flags |= VMI_NonDiamondRepeat; + } + } + + // Walk all bases. + for (const auto &bs : baseDecl->bases()) + flags |= computeVmiClassTypeInfoFlags(&bs, bases); + + return flags; +} + +static unsigned computeVmiClassTypeInfoFlags(const CXXRecordDecl *rd) { + unsigned flags = 0; + SeenBases bases; + + // Walk all bases. + for (const auto &bs : rd->bases()) + flags |= computeVmiClassTypeInfoFlags(&bs, bases); + + return flags; +} + +// Return whether the given record decl has a "single, +// public, non-virtual base at offset zero (i.e. the derived class is dynamic +// iff the base is)", according to Itanium C++ ABI, 2.95p6b. +// TODO(cir): this can unified with LLVM codegen +static bool canUseSingleInheritance(const CXXRecordDecl *rd) { + // Check the number of bases. + if (rd->getNumBases() != 1) + return false; + + // Get the base. + CXXRecordDecl::base_class_const_iterator base = rd->bases_begin(); + + // Check that the base is not virtual. + if (base->isVirtual()) + return false; + + // Check that the base is public. + if (base->getAccessSpecifier() != AS_public) + return false; + + // Check that the class is dynamic iff the base is. + auto *baseDecl = base->getType()->castAsCXXRecordDecl(); + return baseDecl->isEmpty() || + baseDecl->isDynamicClass() == rd->isDynamicClass(); +} + +/// IsIncompleteClassType - Returns whether the given record type is incomplete. +static bool isIncompleteClassType(const RecordType *recordTy) { + return !recordTy->getOriginalDecl() + ->getDefinitionOrSelf() + ->isCompleteDefinition(); +} + +/// Returns whether the given type contains an +/// incomplete class type. This is true if +/// +/// * The given type is an incomplete class type. +/// * The given type is a pointer type whose pointee type contains an +/// incomplete class type. +/// * The given type is a member pointer type whose class is an incomplete +/// class type. +/// * The given type is a member pointer type whoise pointee type contains an +/// incomplete class type. +/// is an indirect or direct pointer to an incomplete class type. +static bool containsIncompleteClassType(QualType ty) { + if (const auto *recordTy = dyn_cast(ty)) { + if (isIncompleteClassType(recordTy)) + return true; + } + + if (const auto *pointerTy = dyn_cast(ty)) + return containsIncompleteClassType(pointerTy->getPointeeType()); + + if (const auto *memberPointerTy = dyn_cast(ty)) { + // Check if the class type is incomplete. + if (!memberPointerTy->getMostRecentCXXRecordDecl()->hasDefinition()) + return true; + + return containsIncompleteClassType(memberPointerTy->getPointeeType()); + } + + return false; +} + +const char *vTableClassNameForType(const CIRGenModule &cgm, const Type *ty) { + // abi::__class_type_info. + static const char *const classTypeInfo = + "_ZTVN10__cxxabiv117__class_type_infoE"; + // abi::__si_class_type_info. + static const char *const siClassTypeInfo = + "_ZTVN10__cxxabiv120__si_class_type_infoE"; + // abi::__vmi_class_type_info. + static const char *const vmiClassTypeInfo = + "_ZTVN10__cxxabiv121__vmi_class_type_infoE"; + + switch (ty->getTypeClass()) { +#define TYPE(Class, Base) +#define ABSTRACT_TYPE(Class, Base) +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: +#define NON_CANONICAL_TYPE(Class, Base) case Type::Class: +#define DEPENDENT_TYPE(Class, Base) case Type::Class: +#include "clang/AST/TypeNodes.inc" + llvm_unreachable("Non-canonical and dependent types shouldn't get here"); + + case Type::LValueReference: + case Type::RValueReference: + llvm_unreachable("References shouldn't get here"); + + case Type::Auto: + case Type::DeducedTemplateSpecialization: + llvm_unreachable("Undeduced type shouldn't get here"); + + case Type::Pipe: + llvm_unreachable("Pipe types shouldn't get here"); + + case Type::ArrayParameter: + llvm_unreachable("Array Parameter types should not get here."); + + case Type::Builtin: + case Type::BitInt: + // GCC treats vector and complex types as fundamental types. + case Type::Vector: + case Type::ExtVector: + case Type::ConstantMatrix: + case Type::Complex: + case Type::Atomic: + // FIXME: GCC treats block pointers as fundamental types?! + case Type::BlockPointer: + cgm.errorNYI("VTableClassNameForType: __fundamental_type_info"); + break; + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: + cgm.errorNYI("VTableClassNameForType: __array_type_info"); + break; + + case Type::FunctionNoProto: + case Type::FunctionProto: + cgm.errorNYI("VTableClassNameForType: __function_type_info"); + break; + + case Type::Enum: + cgm.errorNYI("VTableClassNameForType: Enum"); + break; + + case Type::Record: { + const CXXRecordDecl *rd = + cast(cast(ty)->getOriginalDecl()) + ->getDefinitionOrSelf(); + + if (!rd->hasDefinition() || !rd->getNumBases()) { + return classTypeInfo; + } + + if (canUseSingleInheritance(rd)) { + return siClassTypeInfo; + } + + return vmiClassTypeInfo; + } + + case Type::ObjCObject: + cgm.errorNYI("VTableClassNameForType: ObjCObject"); + break; + + case Type::ObjCInterface: + cgm.errorNYI("VTableClassNameForType: ObjCInterface"); + break; + + case Type::ObjCObjectPointer: + case Type::Pointer: + cgm.errorNYI("VTableClassNameForType: __pointer_type_info"); + break; + + case Type::MemberPointer: + cgm.errorNYI("VTableClassNameForType: __pointer_to_member_type_info"); + break; + + case Type::HLSLAttributedResource: + case Type::HLSLInlineSpirv: + llvm_unreachable("HLSL doesn't support virtual functions"); + } + + return nullptr; +} +} // namespace + +/// Return the linkage that the type info and type info name constants +/// should have for the given type. +static cir::GlobalLinkageKind getTypeInfoLinkage(CIRGenModule &cgm, + QualType ty) { + // In addition, it and all of the intermediate abi::__pointer_type_info + // structs in the chain down to the abi::__class_type_info for the + // incomplete class type must be prevented from resolving to the + // corresponding type_info structs for the complete class type, possibly + // by making them local static objects. Finally, a dummy class RTTI is + // generated for the incomplete type that will not resolve to the final + // complete class RTTI (because the latter need not exist), possibly by + // making it a local static object. + if (containsIncompleteClassType(ty)) + return cir::GlobalLinkageKind::InternalLinkage; + + switch (ty->getLinkage()) { + case Linkage::Invalid: + llvm_unreachable("Linkage hasn't been computed!"); + + case Linkage::None: + case Linkage::Internal: + case Linkage::UniqueExternal: + return cir::GlobalLinkageKind::InternalLinkage; + + case Linkage::VisibleNone: + case Linkage::Module: + case Linkage::External: + // RTTI is not enabled, which means that this type info struct is going + // to be used for exception handling. Give it linkonce_odr linkage. + if (!cgm.getLangOpts().RTTI) + return cir::GlobalLinkageKind::LinkOnceODRLinkage; + + if (const RecordType *record = dyn_cast(ty)) { + const CXXRecordDecl *rd = + cast(record->getOriginalDecl())->getDefinitionOrSelf(); + if (rd->hasAttr()) + return cir::GlobalLinkageKind::WeakODRLinkage; + + if (cgm.getTriple().isWindowsItaniumEnvironment()) + if (rd->hasAttr() && + shouldUseExternalRttiDescriptor(cgm, ty)) + return cir::GlobalLinkageKind::ExternalLinkage; + + // MinGW always uses LinkOnceODRLinkage for type info. + if (rd->isDynamicClass() && !cgm.getASTContext() + .getTargetInfo() + .getTriple() + .isWindowsGNUEnvironment()) + return cgm.getVTableLinkage(rd); + } + + return cir::GlobalLinkageKind::LinkOnceODRLinkage; + } + + llvm_unreachable("Invalid linkage!"); +} + +cir::GlobalOp +CIRGenItaniumRTTIBuilder::getAddrOfTypeName(mlir::Location loc, QualType ty, + cir::GlobalLinkageKind linkage) { + CIRGenBuilderTy &builder = cgm.getBuilder(); + SmallString<256> name; + llvm::raw_svector_ostream out(name); + cgm.getCXXABI().getMangleContext().mangleCXXRTTIName(ty, out); + + // We know that the mangled name of the type starts at index 4 of the + // mangled name of the typename, so we can just index into it in order to + // get the mangled name of the type. + mlir::Attribute init = builder.getString( + name.substr(4), cgm.convertType(cgm.getASTContext().CharTy), + std::nullopt); + + CharUnits align = + cgm.getASTContext().getTypeAlignInChars(cgm.getASTContext().CharTy); + + // builder.getString can return a #cir.zero if the string given to it only + // contains null bytes. However, type names cannot be full of null bytes. + // So cast Init to a ConstArrayAttr should be safe. + auto initStr = cast(init); + + cir::GlobalOp gv = cgm.createOrReplaceCXXRuntimeVariable( + loc, name, initStr.getType(), linkage, align); + CIRGenModule::setInitializer(gv, init); + return gv; +} + +mlir::Attribute +CIRGenItaniumRTTIBuilder::getAddrOfExternalRTTIDescriptor(mlir::Location loc, + QualType ty) { + // Mangle the RTTI name. + SmallString<256> name; + llvm::raw_svector_ostream out(name); + cgm.getCXXABI().getMangleContext().mangleCXXRTTI(ty, out); + CIRGenBuilderTy &builder = cgm.getBuilder(); + + // Look for an existing global. + cir::GlobalOp gv = dyn_cast_or_null( + mlir::SymbolTable::lookupSymbolIn(cgm.getModule(), name)); + + if (!gv) { + // Create a new global variable. + // From LLVM codegen => Note for the future: If we would ever like to do + // deferred emission of RTTI, check if emitting vtables opportunistically + // need any adjustment. + gv = CIRGenModule::createGlobalOp(cgm, loc, name, builder.getUInt8PtrTy(), + /*isConstant=*/true); + const CXXRecordDecl *rd = ty->getAsCXXRecordDecl(); + cgm.setGVProperties(gv, rd); + + // Import the typeinfo symbol when all non-inline virtual methods are + // imported. + if (cgm.getTarget().hasPS4DLLImportExport()) { + cgm.errorNYI("getAddrOfExternalRTTIDescriptor: hasPS4DLLImportExport"); + } + } + + return builder.getGlobalViewAttr(builder.getUInt8PtrTy(), gv); +} + +void CIRGenItaniumRTTIBuilder::buildVTablePointer(mlir::Location loc, + const Type *ty) { + CIRGenBuilderTy &builder = cgm.getBuilder(); + const char *vTableName = vTableClassNameForType(cgm, ty); + + // Check if the alias exists. If it doesn't, then get or create the global. + if (cgm.getItaniumVTableContext().isRelativeLayout()) { + cgm.errorNYI("buildVTablePointer: isRelativeLayout"); + return; + } + + mlir::Type vtableGlobalTy = builder.getPointerTo(builder.getUInt8PtrTy()); + llvm::Align align = cgm.getDataLayout().getABITypeAlign(vtableGlobalTy); + cir::GlobalOp vTable = cgm.createOrReplaceCXXRuntimeVariable( + loc, vTableName, vtableGlobalTy, cir::GlobalLinkageKind::ExternalLinkage, + CharUnits::fromQuantity(align)); + + // The vtable address point is 2. + mlir::Attribute field{}; + if (cgm.getItaniumVTableContext().isRelativeLayout()) { + cgm.errorNYI("buildVTablePointer: isRelativeLayout"); + } else { + SmallVector offsets{ + cgm.getBuilder().getI32IntegerAttr(2)}; + auto indices = mlir::ArrayAttr::get(builder.getContext(), offsets); + field = cgm.getBuilder().getGlobalViewAttr(cgm.getBuilder().getUInt8PtrTy(), + vTable, indices); + } + + assert(field && "expected attribute"); + fields.push_back(field); +} + +/// Build an abi::__si_class_type_info, used for single inheritance, according +/// to the Itanium C++ ABI, 2.95p6b. +void CIRGenItaniumRTTIBuilder::buildSIClassTypeInfo(mlir::Location loc, + const CXXRecordDecl *rd) { + // Itanium C++ ABI 2.9.5p6b: + // It adds to abi::__class_type_info a single member pointing to the + // type_info structure for the base type, + mlir::Attribute baseTypeInfo = + CIRGenItaniumRTTIBuilder(cxxABI, cgm) + .buildTypeInfo(loc, rd->bases_begin()->getType()); + fields.push_back(baseTypeInfo); +} + +/// Build an abi::__vmi_class_type_info, used for +/// classes with bases that do not satisfy the abi::__si_class_type_info +/// constraints, according to the Itanium C++ ABI, 2.9.5p5c. +void CIRGenItaniumRTTIBuilder::buildVMIClassTypeInfo(mlir::Location loc, + const CXXRecordDecl *rd) { + mlir::Type unsignedIntLTy = + cgm.convertType(cgm.getASTContext().UnsignedIntTy); + + // Itanium C++ ABI 2.9.5p6c: + // __flags is a word with flags describing details about the class + // structure, which may be referenced by using the __flags_masks + // enumeration. These flags refer to both direct and indirect bases. + unsigned flags = computeVmiClassTypeInfoFlags(rd); + fields.push_back(cir::IntAttr::get(unsignedIntLTy, flags)); + + // Itanium C++ ABI 2.9.5p6c: + // __base_count is a word with the number of direct proper base class + // descriptions that follow. + fields.push_back(cir::IntAttr::get(unsignedIntLTy, rd->getNumBases())); + + if (!rd->getNumBases()) + return; + + // Now add the base class descriptions. + + // Itanium C++ ABI 2.9.5p6c: + // __base_info[] is an array of base class descriptions -- one for every + // direct proper base. Each description is of the type: + // + // struct abi::__base_class_type_info { + // public: + // const __class_type_info *__base_type; + // long __offset_flags; + // + // enum __offset_flags_masks { + // __virtual_mask = 0x1, + // __public_mask = 0x2, + // __offset_shift = 8 + // }; + // }; + + // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long + // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on + // LLP64 platforms. + // FIXME: Consider updating libc++abi to match, and extend this logic to all + // LLP64 platforms. + QualType offsetFlagsTy = cgm.getASTContext().LongTy; + const TargetInfo &ti = cgm.getASTContext().getTargetInfo(); + if (ti.getTriple().isOSCygMing() && + ti.getPointerWidth(LangAS::Default) > ti.getLongWidth()) + offsetFlagsTy = cgm.getASTContext().LongLongTy; + mlir::Type offsetFlagsLTy = cgm.convertType(offsetFlagsTy); + + for (const CXXBaseSpecifier &base : rd->bases()) { + // The __base_type member points to the RTTI for the base type. + fields.push_back(CIRGenItaniumRTTIBuilder(cxxABI, cgm) + .buildTypeInfo(loc, base.getType())); + + CXXRecordDecl *baseDecl = base.getType()->castAsCXXRecordDecl(); + int64_t offsetFlags = 0; + + // All but the lower 8 bits of __offset_flags are a signed offset. + // For a non-virtual base, this is the offset in the object of the base + // subobject. For a virtual base, this is the offset in the virtual table of + // the virtual base offset for the virtual base referenced (negative). + CharUnits offset; + if (base.isVirtual()) + offset = cgm.getItaniumVTableContext().getVirtualBaseOffsetOffset( + rd, baseDecl); + else { + const ASTRecordLayout &layout = + cgm.getASTContext().getASTRecordLayout(rd); + offset = layout.getBaseClassOffset(baseDecl); + } + offsetFlags = uint64_t(offset.getQuantity()) << 8; + + // The low-order byte of __offset_flags contains flags, as given by the + // masks from the enumeration __offset_flags_masks. + if (base.isVirtual()) + offsetFlags |= BCTI_Virtual; + if (base.getAccessSpecifier() == AS_public) + offsetFlags |= BCTI_Public; + + fields.push_back(cir::IntAttr::get(offsetFlagsLTy, offsetFlags)); + } +} + +mlir::Attribute CIRGenItaniumRTTIBuilder::buildTypeInfo(mlir::Location loc, + QualType ty) { + // We want to operate on the canonical type. + ty = ty.getCanonicalType(); + + // Check if we've already emitted an RTTI descriptor for this type. + SmallString<256> name; + llvm::raw_svector_ostream out(name); + cgm.getCXXABI().getMangleContext().mangleCXXRTTI(ty, out); + + auto oldGV = dyn_cast_or_null( + mlir::SymbolTable::lookupSymbolIn(cgm.getModule(), name)); + + if (oldGV && !oldGV.isDeclaration()) { + assert(!oldGV.hasAvailableExternallyLinkage() && + "available_externally typeinfos not yet implemented"); + return cgm.getBuilder().getGlobalViewAttr(cgm.getBuilder().getUInt8PtrTy(), + oldGV); + } + + // Check if there is already an external RTTI descriptor for this type. + if (isStandardLibraryRttiDescriptor(ty) || + shouldUseExternalRttiDescriptor(cgm, ty)) + return getAddrOfExternalRTTIDescriptor(loc, ty); + + // Emit the standard library with external linkage. + cir::GlobalLinkageKind linkage = getTypeInfoLinkage(cgm, ty); + + // Give the type_info object and name the formal visibility of the + // type itself. + assert(!cir::MissingFeatures::hiddenVisibility()); + assert(!cir::MissingFeatures::protectedVisibility()); + + mlir::SymbolTable::Visibility symVisibility; + if (cir::isLocalLinkage(linkage)) + // If the linkage is local, only default visibility makes sense. + symVisibility = mlir::SymbolTable::Visibility::Public; + else if (cxxABI.classifyRTTIUniqueness(ty, linkage) == + CIRGenItaniumCXXABI::RUK_NonUniqueHidden) { + cgm.errorNYI( + "buildTypeInfo: classifyRTTIUniqueness == RUK_NonUniqueHidden"); + symVisibility = CIRGenModule::getMLIRVisibility(ty->getVisibility()); + } else + symVisibility = CIRGenModule::getMLIRVisibility(ty->getVisibility()); + + return buildTypeInfo(loc, ty, linkage, symVisibility); +} + +mlir::Attribute CIRGenItaniumRTTIBuilder::buildTypeInfo( + mlir::Location loc, QualType ty, cir::GlobalLinkageKind linkage, + mlir::SymbolTable::Visibility visibility) { + CIRGenBuilderTy &builder = cgm.getBuilder(); + + assert(!cir::MissingFeatures::setDLLStorageClass()); + + // Add the vtable pointer. + buildVTablePointer(loc, cast(ty)); + + // And the name. + cir::GlobalOp typeName = getAddrOfTypeName(loc, ty, linkage); + mlir::Attribute typeNameField; + + // If we're supposed to demote the visibility, be sure to set a flag + // to use a string comparison for type_info comparisons. + CIRGenItaniumCXXABI::RTTIUniquenessKind rttiUniqueness = + cxxABI.classifyRTTIUniqueness(ty, linkage); + if (rttiUniqueness != CIRGenItaniumCXXABI::RUK_Unique) { + // The flag is the sign bit, which on ARM64 is defined to be clear + // for global pointers. This is very ARM64-specific. + cgm.errorNYI( + "buildTypeInfo: rttiUniqueness != CIRGenItaniumCXXABI::RUK_Unique"); + } else { + typeNameField = + builder.getGlobalViewAttr(builder.getUInt8PtrTy(), typeName); + } + + fields.push_back(typeNameField); + + switch (ty->getTypeClass()) { +#define TYPE(Class, Base) +#define ABSTRACT_TYPE(Class, Base) +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: +#define NON_CANONICAL_TYPE(Class, Base) case Type::Class: +#define DEPENDENT_TYPE(Class, Base) case Type::Class: +#include "clang/AST/TypeNodes.inc" + llvm_unreachable("Non-canonical and dependent types shouldn't get here"); + + // GCC treats vector types as fundamental types. + case Type::Builtin: + case Type::Vector: + case Type::ExtVector: + case Type::ConstantMatrix: + case Type::Complex: + case Type::BlockPointer: + // Itanium C++ ABI 2.9.5p4: + // abi::__fundamental_type_info adds no data members to std::type_info. + break; + + case Type::LValueReference: + case Type::RValueReference: + llvm_unreachable("References shouldn't get here"); + + case Type::Auto: + case Type::DeducedTemplateSpecialization: + llvm_unreachable("Undeduced type shouldn't get here"); + + case Type::Pipe: + break; + + case Type::BitInt: + break; + + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: + case Type::ArrayParameter: + // Itanium C++ ABI 2.9.5p5: + // abi::__array_type_info adds no data members to std::type_info. + break; + + case Type::FunctionNoProto: + case Type::FunctionProto: + // Itanium C++ ABI 2.9.5p5: + // abi::__function_type_info adds no data members to std::type_info. + break; + + case Type::Enum: + // Itanium C++ ABI 2.9.5p5: + // abi::__enum_type_info adds no data members to std::type_info. + break; + + case Type::Record: { + const auto *rd = + cast(cast(ty)->getOriginalDecl()) + ->getDefinitionOrSelf(); + if (!rd->hasDefinition() || !rd->getNumBases()) { + // We don't need to emit any fields. + break; + } + + if (canUseSingleInheritance(rd)) { + buildSIClassTypeInfo(loc, rd); + } else { + buildVMIClassTypeInfo(loc, rd); + } + + break; + } + + case Type::ObjCObject: + case Type::ObjCInterface: + cgm.errorNYI("buildTypeInfo: ObjCObject & ObjCInterface"); + break; + + case Type::ObjCObjectPointer: + cgm.errorNYI("buildTypeInfo: ObjCObjectPointer"); + break; + + case Type::Pointer: + cgm.errorNYI("buildTypeInfo: Pointer"); + break; + + case Type::MemberPointer: + cgm.errorNYI("buildTypeInfo: MemberPointer"); + break; + + case Type::Atomic: + // No fields, at least for the moment. + break; + + case Type::HLSLAttributedResource: + case Type::HLSLInlineSpirv: + llvm_unreachable("HLSL doesn't support RTTI"); + } + + assert(!cir::MissingFeatures::opGlobalDLLImportExport()); + cir::TypeInfoAttr init = builder.getTypeInfo(builder.getArrayAttr(fields)); + + SmallString<256> name; + llvm::raw_svector_ostream out(name); + cgm.getCXXABI().getMangleContext().mangleCXXRTTI(ty, out); + + // Create new global and search for an existing global. + auto oldGV = dyn_cast_or_null( + mlir::SymbolTable::lookupSymbolIn(cgm.getModule(), name)); + + cir::GlobalOp gv = + CIRGenModule::createGlobalOp(cgm, loc, name, init.getType(), + /*isConstant=*/true); + + // Export the typeinfo in the same circumstances as the vtable is + // exported. + if (cgm.getTarget().hasPS4DLLImportExport()) { + cgm.errorNYI("buildTypeInfo: target hasPS4DLLImportExport"); + return {}; + } + + // If there's already an old global variable, replace it with the new one. + if (oldGV) { + // Replace occurrences of the old variable if needed. + gv.setName(oldGV.getName()); + if (!oldGV->use_empty()) { + cgm.errorNYI("buildTypeInfo: old GV !use_empty"); + return {}; + } + oldGV->erase(); + } + + if (cgm.supportsCOMDAT() && cir::isWeakForLinker(gv.getLinkage())) { + assert(!cir::MissingFeatures::setComdat()); + cgm.errorNYI("buildTypeInfo: supportsCOMDAT & isWeakForLinker"); + return {}; + } + + CharUnits align = cgm.getASTContext().toCharUnitsFromBits( + cgm.getTarget().getPointerAlign(LangAS::Default)); + gv.setAlignmentAttr(cgm.getSize(align)); + + // The Itanium ABI specifies that type_info objects must be globally + // unique, with one exception: if the type is an incomplete class + // type or a (possibly indirect) pointer to one. That exception + // affects the general case of comparing type_info objects produced + // by the typeid operator, which is why the comparison operators on + // std::type_info generally use the type_info name pointers instead + // of the object addresses. However, the language's built-in uses + // of RTTI generally require class types to be complete, even when + // manipulating pointers to those class types. This allows the + // implementation of dynamic_cast to rely on address equality tests, + // which is much faster. + + // All of this is to say that it's important that both the type_info + // object and the type_info name be uniqued when weakly emitted. + + mlir::SymbolTable::setSymbolVisibility(typeName, visibility); + assert(!cir::MissingFeatures::setDLLStorageClass()); + assert(!cir::MissingFeatures::opGlobalPartition()); + assert(!cir::MissingFeatures::setDSOLocal()); + + mlir::SymbolTable::setSymbolVisibility(gv, visibility); + assert(!cir::MissingFeatures::setDLLStorageClass()); + assert(!cir::MissingFeatures::opGlobalPartition()); + assert(!cir::MissingFeatures::setDSOLocal()); + + CIRGenModule::setInitializer(gv, init); + return builder.getGlobalViewAttr(builder.getUInt8PtrTy(), gv); +} + +mlir::Attribute CIRGenItaniumCXXABI::getAddrOfRTTIDescriptor(mlir::Location loc, + QualType ty) { + return CIRGenItaniumRTTIBuilder(*this, cgm).buildTypeInfo(loc, ty); +} + +/// What sort of uniqueness rules should we use for the RTTI for the +/// given type? +CIRGenItaniumCXXABI::RTTIUniquenessKind +CIRGenItaniumCXXABI::classifyRTTIUniqueness( + QualType canTy, cir::GlobalLinkageKind linkage) const { + if (shouldRTTIBeUnique()) + return RUK_Unique; + + // It's only necessary for linkonce_odr or weak_odr linkage. + if (linkage != cir::GlobalLinkageKind::LinkOnceODRLinkage && + linkage != cir::GlobalLinkageKind::WeakODRLinkage) + return RUK_Unique; + + // It's only necessary with default visibility. + if (canTy->getVisibility() != DefaultVisibility) + return RUK_Unique; + + // If we're not required to publish this symbol, hide it. + if (linkage == cir::GlobalLinkageKind::LinkOnceODRLinkage) + return RUK_NonUniqueHidden; + + // If we're required to publish this symbol, as we might be under an + // explicit instantiation, leave it with default visibility but + // enable string-comparisons. + assert(linkage == cir::GlobalLinkageKind::WeakODRLinkage); + return RUK_NonUniqueVisible; +} + void CIRGenItaniumCXXABI::emitDestructorCall( CIRGenFunction &cgf, const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy) { diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index eef23a0ebda7f..ab709ffa935a8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2171,8 +2171,13 @@ mlir::Attribute CIRGenModule::getAddrOfRTTIDescriptor(mlir::Location loc, if (!shouldEmitRTTI(forEh)) return builder.getConstNullPtrAttr(builder.getUInt8PtrTy()); - errorNYI(loc, "getAddrOfRTTIDescriptor"); - return mlir::Attribute(); + if (forEh && ty->isObjCObjectPointerType() && + langOpts.ObjCRuntime.isGNUFamily()) { + errorNYI(loc, "getAddrOfRTTIDescriptor: Objc PtrType & Objc RT GUN"); + return {}; + } + + return getCXXABI().getAddrOfRTTIDescriptor(loc, ty); } // TODO(cir): this can be shared with LLVM codegen. diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 073e8d96b773b..006111d19d65f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -256,6 +256,24 @@ class CIRGenModule : public CIRGenTypeCache { mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, QualType ty, bool forEH = false); + static mlir::SymbolTable::Visibility getMLIRVisibility(Visibility v) { + switch (v) { + case DefaultVisibility: + return mlir::SymbolTable::Visibility::Public; + case HiddenVisibility: + return mlir::SymbolTable::Visibility::Private; + case ProtectedVisibility: + // The distinction between ProtectedVisibility and DefaultVisibility is + // that symbols with ProtectedVisibility, while visible to the dynamic + // linker like DefaultVisibility, are guaranteed to always dynamically + // resolve to a symbol in the current shared object. There is currently no + // equivalent MLIR visibility, so we fall back on the fact that the symbol + // is visible. + return mlir::SymbolTable::Visibility::Public; + } + llvm_unreachable("unknown visibility!"); + } + /// Return a constant array for the given string. mlir::Attribute getConstantArrayFromStringLiteral(const StringLiteral *e); diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index af8f5ae2cc0a5..94d856b41b3ce 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -47,6 +47,49 @@ cir::RecordType CIRGenVTables::getVTableType(const VTableLayout &layout) { return cgm.getBuilder().getAnonRecordTy(tys, /*incomplete=*/false); } +/// At this point in the translation unit, does it appear that can we +/// rely on the vtable being defined elsewhere in the program? +/// +/// The response is really only definitive when called at the end of +/// the translation unit. +/// +/// The only semantic restriction here is that the object file should +/// not contain a vtable definition when that vtable is defined +/// strongly elsewhere. Otherwise, we'd just like to avoid emitting +/// vtables when unnecessary. +/// TODO(cir): this should be merged into common AST helper for codegen. +bool CIRGenVTables::isVTableExternal(const CXXRecordDecl *rd) { + assert(rd->isDynamicClass() && "Non-dynamic classes have no VTable."); + + // We always synthesize vtables if they are needed in the MS ABI. MSVC doesn't + // emit them even if there is an explicit template instantiation. + if (cgm.getTarget().getCXXABI().isMicrosoft()) + return false; + + // If we have an explicit instantiation declaration (and not a + // definition), the vtable is defined elsewhere. + TemplateSpecializationKind tsk = rd->getTemplateSpecializationKind(); + if (tsk == TSK_ExplicitInstantiationDeclaration) + return true; + + // Otherwise, if the class is an instantiated template, the + // vtable must be defined here. + if (tsk == TSK_ImplicitInstantiation || + tsk == TSK_ExplicitInstantiationDefinition) + return false; + + // Otherwise, if the class doesn't have a key function (possibly + // anymore), the vtable must be defined here. + const CXXMethodDecl *keyFunction = + cgm.getASTContext().getCurrentKeyFunction(rd); + if (!keyFunction) + return false; + + // Otherwise, if we don't have a definition of the key function, the + // vtable must be defined somewhere else. + return !keyFunction->hasBody(); +} + /// This is a callback from Sema to tell us that a particular vtable is /// required to be emitted in this translation unit. /// diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h b/clang/lib/CIR/CodeGen/CIRGenVTables.h index e19242c651034..9c425ab43b3d9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.h +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h @@ -100,6 +100,8 @@ class CIRGenVTables { /// is enabled) and the VTT (if the class has virtual bases). void generateClassData(const CXXRecordDecl *rd); + bool isVTableExternal(const clang::CXXRecordDecl *rd); + /// Returns the type of a vtable with the given layout. Normally a struct of /// arrays of pointers, with one struct element for each vtable in the vtable /// group. diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 876948d53010b..bd6d6e3a6ed09 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -222,8 +222,9 @@ class CIRAttrToValue { return llvm::TypeSwitch(attr) .Case([&](auto attrT) { return visitCirAttr(attrT); }) + cir::ConstPtrAttr, cir::GlobalViewAttr, cir::TypeInfoAttr, + cir::VTableAttr, cir::ZeroAttr>( + [&](auto attrT) { return visitCirAttr(attrT); }) .Default([&](auto attrT) { return mlir::Value(); }); } @@ -1694,7 +1695,7 @@ CIRToLLVMGlobalOpLowering::matchAndRewriteRegionInitializedGlobal( // TODO: Generalize this handling when more types are needed here. assert((isa(init))); + cir::TypeInfoAttr, cir::VTableAttr, cir::ZeroAttr>(init))); // TODO(cir): once LLVM's dialect has proper equivalent attributes this // should be updated. For now, we use a custom op to initialize globals @@ -1749,7 +1750,8 @@ mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite( } else if (mlir::isa(init.value())) { + cir::TypeInfoAttr, cir::VTableAttr, cir::ZeroAttr>( + init.value())) { // TODO(cir): once LLVM's dialect has proper equivalent attributes this // should be updated. For now, we use a custom op to initialize globals // to the appropriate value. diff --git a/clang/test/CIR/CodeGen/vtt.cpp b/clang/test/CIR/CodeGen/vtt.cpp index 9d88acef91eef..baab972bce696 100644 --- a/clang/test/CIR/CodeGen/vtt.cpp +++ b/clang/test/CIR/CodeGen/vtt.cpp @@ -1,9 +1,16 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fno-rtti -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: FileCheck --check-prefixes=CIR-NO-RTTI,CIR-COMMON --input-file=%t.cir %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fno-rtti -fclangir -emit-llvm %s -o %t-cir.ll -// RUN: FileCheck --check-prefix=LLVM --input-file=%t-cir.ll %s +// RUN: FileCheck --check-prefixes=LLVM-NO-RTTI,LLVM-COMMON --input-file=%t-cir.ll %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fno-rtti -emit-llvm %s -o %t.ll -// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ll %s +// RUN: FileCheck --check-prefixes=OGCG-NO-RTTI,OGCG-COMMON --input-file=%t.ll %s + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefixes=CIR-RTTI,CIR-COMMON --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll +// RUN: FileCheck --check-prefixes=LLVM-RTTI,LLVM-COMMON --input-file=%t-cir.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefixes=OGCG-RTTI,OGCG-COMMON --input-file=%t.ll %s // Note: This test will be expanded to verify VTT emission and VTT implicit // argument handling. For now, it's just test the record layout. @@ -39,449 +46,511 @@ void f(D *d) {} // Trigger vtable and VTT emission for D. void D::y() {} -// CIR: !rec_A2Ebase = !cir.record -// CIR: !rec_B2Ebase = !cir.record -// CIR: !rec_C2Ebase = !cir.record -// CIR: !rec_A = !cir.record}> -// CIR: !rec_B = !cir.record, !rec_A2Ebase, !cir.array}> -// CIR: !rec_C = !cir.record -// CIR: !rec_D = !cir.record +// CIR-COMMON: !rec_A2Ebase = !cir.record +// CIR-COMMON: !rec_B2Ebase = !cir.record +// CIR-COMMON: !rec_C2Ebase = !cir.record +// CIR-COMMON: !rec_A = !cir.record}> +// CIR-COMMON: !rec_B = !cir.record, !rec_A2Ebase, !cir.array}> +// CIR-COMMON: !rec_C = !cir.record +// CIR-COMMON: !rec_D = !cir.record -// CIR: !rec_anon_struct = !cir.record x 5>, !cir.array x 4>, !cir.array x 4>}> -// CIR: !rec_anon_struct1 = !cir.record x 4>, !cir.array x 4>}> +// CIR-RTTI: ![[REC_TYPE_INFO_VTABLE:.*]]= !cir.record, !cir.ptr, !u32i, !u32i, !cir.ptr, !s64i, !cir.ptr, !s64i}> +// CIR-COMMON: ![[REC_D_VTABLE:.*]] = !cir.record x 5>, !cir.array x 4>, !cir.array x 4>}> +// CIR-COMMON: ![[REC_B_OR_C_IN_D_VTABLE:.*]]= !cir.record x 4>, !cir.array x 4>}> // Vtable for D -// CIR: cir.global{{.*}} @_ZTV1D = #cir.vtable<{ -// CIR-SAME: #cir.const_array<[ -// CIR-SAME: #cir.ptr<40 : i64> : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZN1B1wEv> : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZN1D1yEv> : !cir.ptr -// CIR-SAME: ]> : !cir.array x 5>, -// CIR-SAME: #cir.const_array<[ -// CIR-SAME: #cir.ptr<24 : i64> : !cir.ptr, -// CIR-SAME: #cir.ptr<-16 : i64> : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZN1C1xEv> : !cir.ptr -// CIR-SAME: ]> : !cir.array x 4>, -// CIR-SAME: #cir.const_array<[ -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.ptr<-40 : i64> : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZN1A1vEv> : !cir.ptr -// CIR-SAME: ]> : !cir.array x 4> -// CIR-SAME: }> : !rec_anon_struct {alignment = 8 : i64} - -// LLVM: @_ZTV1D = global { [5 x ptr], [4 x ptr], [4 x ptr] } { -// LLVM-SAME: [5 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr null, ptr @_ZN1B1wEv, ptr @_ZN1D1yEv], -// LLVM-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr inttoptr (i64 -16 to ptr), ptr null, ptr @_ZN1C1xEv], -// LLVM-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr null, ptr @_ZN1A1vEv] -// LLVM-SAME: }, align 8 - -// OGCG: @_ZTV1D = unnamed_addr constant { [5 x ptr], [4 x ptr], [4 x ptr] } { -// OGCG-SAME: [5 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr null, ptr @_ZN1B1wEv, ptr @_ZN1D1yEv], -// OGCG-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr inttoptr (i64 -16 to ptr), ptr null, ptr @_ZN1C1xEv], -// OGCG-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr null, ptr @_ZN1A1vEv] -// OGCG-SAME: }, align 8 + +// CIR-COMMON: cir.global{{.*}} @_ZTV1D = #cir.vtable<{ +// CIR-COMMON-SAME: #cir.const_array<[ +// CIR-COMMON-SAME: #cir.ptr<40 : i64> : !cir.ptr, +// CIR-COMMON-SAME: #cir.ptr : !cir.ptr, +// CIR-NO-RTTI-SAME: #cir.ptr : !cir.ptr, +// CIR-RTTI-SAME: #cir.global_view<@_ZTI1D> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZN1B1wEv> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZN1D1yEv> : !cir.ptr +// CIR-COMMON-SAME: ]> : !cir.array x 5>, +// CIR-COMMON-SAME: #cir.const_array<[ +// CIR-COMMON-SAME: #cir.ptr<24 : i64> : !cir.ptr, +// CIR-COMMON-SAME: #cir.ptr<-16 : i64> : !cir.ptr, +// CIR-NO-RTTI-SAME: #cir.ptr : !cir.ptr, +// CIR-RTTI-SAME: #cir.global_view<@_ZTI1D> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZN1C1xEv> : !cir.ptr +// CIR-COMMON-SAME: ]> : !cir.array x 4>, +// CIR-COMMON-SAME: #cir.const_array<[ +// CIR-COMMON-SAME: #cir.ptr : !cir.ptr, +// CIR-COMMON-SAME: #cir.ptr<-40 : i64> : !cir.ptr, +// CIR-NO-RTTI-SAME: #cir.ptr : !cir.ptr, +// CIR-RTTI-SAME: #cir.global_view<@_ZTI1D> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZN1A1vEv> : !cir.ptr +// CIR-COMMON-SAME: ]> : !cir.array x 4> +// CIR-COMMON-SAME: }> : ![[REC_D_VTABLE]] {alignment = 8 : i64} + +// LLVM-COMMON: @_ZTV1D = global { [5 x ptr], [4 x ptr], [4 x ptr] } { +// LLVM-NO-RTTI-SAME: [5 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr null, ptr @_ZN1B1wEv, ptr @_ZN1D1yEv], +// LLVM-RTTI-SAME: [5 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr @_ZTI1D, ptr @_ZN1B1wEv, ptr @_ZN1D1yEv], +// LLVM-NO-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr inttoptr (i64 -16 to ptr), ptr null, ptr @_ZN1C1xEv], +// LLVM-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr inttoptr (i64 -16 to ptr), ptr @_ZTI1D, ptr @_ZN1C1xEv], +// LLVM-NO-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr null, ptr @_ZN1A1vEv] +// LLVM-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr @_ZTI1D, ptr @_ZN1A1vEv] +// LLVM-COMMON-SAME: }, align 8 + +// OGCG-COMMON: @_ZTV1D = unnamed_addr constant { [5 x ptr], [4 x ptr], [4 x ptr] } { +// OGCG-NO-RTTI-SAME: [5 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr null, ptr @_ZN1B1wEv, ptr @_ZN1D1yEv], +// OGCG-RTTI-SAME: [5 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr @_ZTI1D, ptr @_ZN1B1wEv, ptr @_ZN1D1yEv], +// OGCG-NO-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr inttoptr (i64 -16 to ptr), ptr null, ptr @_ZN1C1xEv], +// OGCG-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr inttoptr (i64 -16 to ptr), ptr @_ZTI1D, ptr @_ZN1C1xEv], +// OGCG-NO-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr null, ptr @_ZN1A1vEv] +// OGCG-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr @_ZTI1D, ptr @_ZN1A1vEv] +// OGCG-COMMON-SAME: }, align 8 // VTT for D -// CIR: cir.global{{.*}} @_ZTT1D = #cir.const_array<[ -// CIR-SAME: #cir.global_view<@_ZTV1D, [0 : i32, 3 : i32]> : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZTC1D0_1B, [0 : i32, 3 : i32]> : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZTC1D0_1B, [1 : i32, 3 : i32]> : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZTC1D16_1C, [0 : i32, 3 : i32]> : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZTC1D16_1C, [1 : i32, 3 : i32]> : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZTV1D, [2 : i32, 3 : i32]> : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZTV1D, [1 : i32, 3 : i32]> : !cir.ptr -// CIR-SAME: ]> : !cir.array x 7> {alignment = 8 : i64} - -// LLVM: @_ZTT1D = global [7 x ptr] [ -// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 24), -// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D0_1B, i64 24), -// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D0_1B, i64 56), -// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D16_1C, i64 24), -// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D16_1C, i64 56), -// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 96), -// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 64) -// LLVM-SAME: ], align 8 - -// OGCG: @_ZTT1D = unnamed_addr constant [7 x ptr] [ -// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 16) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 0, i32 3), -// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D0_1B, i32 0, i32 0, i32 3), -// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D0_1B, i32 0, i32 1, i32 3), -// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D16_1C, i32 0, i32 0, i32 3), -// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D16_1C, i32 0, i32 1, i32 3), -// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 2, i32 3), -// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 1, i32 3) -// OGCG-SAME: ], align 8 + +// CIR-COMMON: cir.global{{.*}} @_ZTT1D = #cir.const_array<[ +// CIR-COMMON-SAME: #cir.global_view<@_ZTV1D, [0 : i32, 3 : i32]> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZTC1D0_1B, [0 : i32, 3 : i32]> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZTC1D0_1B, [1 : i32, 3 : i32]> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZTC1D16_1C, [0 : i32, 3 : i32]> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZTC1D16_1C, [1 : i32, 3 : i32]> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZTV1D, [2 : i32, 3 : i32]> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZTV1D, [1 : i32, 3 : i32]> : !cir.ptr +// CIR-COMMON-SAME: ]> : !cir.array x 7> {alignment = 8 : i64} + +// LLVM-COMMON: @_ZTT1D = global [7 x ptr] [ +// LLVM-COMMON-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 24), +// LLVM-COMMON-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D0_1B, i64 24), +// LLVM-COMMON-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D0_1B, i64 56), +// LLVM-COMMON-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D16_1C, i64 24), +// LLVM-COMMON-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D16_1C, i64 56), +// LLVM-COMMON-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 96), +// LLVM-COMMON-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 64) +// LLVM-COMMON-SAME: ], align 8 + +// OGCG-COMMON: @_ZTT1D = unnamed_addr constant [7 x ptr] [ +// OGCG-COMMON-SAME: ptr getelementptr inbounds inrange(-24, 16) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 0, i32 3), +// OGCG-COMMON-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D0_1B, i32 0, i32 0, i32 3), +// OGCG-COMMON-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D0_1B, i32 0, i32 1, i32 3), +// OGCG-COMMON-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D16_1C, i32 0, i32 0, i32 3), +// OGCG-COMMON-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D16_1C, i32 0, i32 1, i32 3), +// OGCG-COMMON-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 2, i32 3), +// OGCG-COMMON-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 1, i32 3) +// OGCG-COMMON-SAME: ], align 8 // Construction vtable for B-in-D -// CIR: cir.global{{.*}} @_ZTC1D0_1B = #cir.vtable<{ -// CIR-SAME: #cir.const_array<[ -// CIR-SAME: #cir.ptr<40 : i64> : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZN1B1wEv> : !cir.ptr -// CIR-SAME: ]> : !cir.array x 4>, -// CIR-SAME: #cir.const_array<[ -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.ptr<-40 : i64> : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZN1A1vEv> : !cir.ptr -// CIR-SAME: ]> : !cir.array x 4> -// CIR-SAME: }> : !rec_anon_struct1 {alignment = 8 : i64} - -// LLVM: @_ZTC1D0_1B = global { [4 x ptr], [4 x ptr] } { -// LLVM-SAME: [4 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr null, ptr @_ZN1B1wEv], -// LLVM-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr null, ptr @_ZN1A1vEv] -// LLVM-SAME: }, align 8 - -// OGCG: @_ZTC1D0_1B = unnamed_addr constant { [4 x ptr], [4 x ptr] } { -// OGCG-SAME: [4 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr null, ptr @_ZN1B1wEv], -// OGCG-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr null, ptr @_ZN1A1vEv] -// OGCG-SAME: }, align 8 + +// CIR-COMMON: cir.global{{.*}} @_ZTC1D0_1B = #cir.vtable<{ +// CIR-COMMON-SAME: #cir.const_array<[ +// CIR-COMMON-SAME: #cir.ptr<40 : i64> : !cir.ptr, +// CIR-COMMON-SAME: #cir.ptr : !cir.ptr, +// CIR-NO-RTTI-SAME: #cir.ptr : !cir.ptr, +// CIR-RTTI-SAME: #cir.global_view<@_ZTI1B> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZN1B1wEv> : !cir.ptr +// CIR-COMMON-SAME: ]> : !cir.array x 4>, +// CIR-COMMON-SAME: #cir.const_array<[ +// CIR-COMMON-SAME: #cir.ptr : !cir.ptr, +// CIR-COMMON-SAME: #cir.ptr<-40 : i64> : !cir.ptr, +// CIR-NO-RTTI-SAME: #cir.ptr : !cir.ptr, +// CIR-RTTI-SAME: #cir.global_view<@_ZTI1B> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZN1A1vEv> : !cir.ptr +// CIR-COMMON-SAME: ]> : !cir.array x 4> +// CIR-COMMON-SAME: }> : ![[REC_B_OR_C_IN_D_VTABLE]] + +// LLVM-COMMON: @_ZTC1D0_1B = global { [4 x ptr], [4 x ptr] } { +// LLVM-NO-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr null, ptr @_ZN1B1wEv], +// LLVM-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr @_ZTI1B, ptr @_ZN1B1wEv], +// LLVM-NO-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr null, ptr @_ZN1A1vEv] +// LLVM-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr @_ZTI1B, ptr @_ZN1A1vEv] +// LLVM-COMMON-SAME: }, align 8 + +// OGCG-COMMON: @_ZTC1D0_1B = unnamed_addr constant { [4 x ptr], [4 x ptr] } { +// OGCG-NO-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr null, ptr @_ZN1B1wEv], +// OGCG-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr @_ZTI1B, ptr @_ZN1B1wEv], +// OGCG-NO-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr null, ptr @_ZN1A1vEv] +// OGCG-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr @_ZTI1B, ptr @_ZN1A1vEv] +// OGCG-COMMON-SAME: }, align 8 + +// CIR-RTTI: cir.global{{.*}} @_ZTI1B : !cir.ptr + +// LLVM-RTTI: @_ZTI1B = external global ptr + +// OGCG-RTTI: @_ZTI1B = external constant ptr // Construction vtable for C-in-D -// CIR: cir.global{{.*}} @_ZTC1D16_1C = #cir.vtable<{ -// CIR-SAME: #cir.const_array<[ -// CIR-SAME: #cir.ptr<24 : i64> : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZN1C1xEv> : !cir.ptr -// CIR-SAME: ]> : !cir.array x 4>, -// CIR-SAME: #cir.const_array<[ -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.ptr<-24 : i64> : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZN1A1vEv> : !cir.ptr -// CIR-SAME: ]> : !cir.array x 4> -// CIR-SAME: }> : !rec_anon_struct1 {alignment = 8 : i64} - -// LLVM: @_ZTC1D16_1C = global { [4 x ptr], [4 x ptr] } { -// LLVM-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr null, ptr null, ptr @_ZN1C1xEv], -// LLVM-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -24 to ptr), ptr null, ptr @_ZN1A1vEv] -// LLVM-SAME: }, align 8 - -// OGCG: @_ZTC1D16_1C = unnamed_addr constant { [4 x ptr], [4 x ptr] } { -// OGCG-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr null, ptr null, ptr @_ZN1C1xEv], -// OGCG-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -24 to ptr), ptr null, ptr @_ZN1A1vEv] -// OGCG-SAME: }, align 8 + +// CIR-COMMON: cir.global{{.*}} @_ZTC1D16_1C = #cir.vtable<{ +// CIR-COMMON-SAME: #cir.const_array<[ +// CIR-COMMON-SAME: #cir.ptr<24 : i64> : !cir.ptr, +// CIR-NO-RTTI-SAME: #cir.ptr : !cir.ptr, +// CIR-RTTI-SAME: #cir.global_view<@_ZTI1C> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZN1C1xEv> : !cir.ptr +// CIR-COMMON-SAME: ]> : !cir.array x 4>, +// CIR-COMMON-SAME: #cir.const_array<[ +// CIR-COMMON-SAME: #cir.ptr : !cir.ptr, +// CIR-COMMON-SAME: #cir.ptr<-24 : i64> : !cir.ptr, +// CIR-NO-RTTI-SAME: #cir.ptr : !cir.ptr, +// CIR-RTTI-SAME: #cir.global_view<@_ZTI1C> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZN1A1vEv> : !cir.ptr +// CIR-COMMON-SAME: ]> : !cir.array x 4>}> +// CIR-COMMON-SAME: : ![[REC_B_OR_C_IN_D_VTABLE]] + +// LLVM-COMMON: @_ZTC1D16_1C = global { [4 x ptr], [4 x ptr] } { +// LLVM-NO-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr null, ptr null, ptr @_ZN1C1xEv], +// LLVM-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr null, ptr @_ZTI1C, ptr @_ZN1C1xEv], +// LLVM-NO-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -24 to ptr), ptr null, ptr @_ZN1A1vEv] +// LLVM-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -24 to ptr), ptr @_ZTI1C, ptr @_ZN1A1vEv] +// LLVM-COMMON-SAME: }, align 8 + +// OGCG-COMMON: @_ZTC1D16_1C = unnamed_addr constant { [4 x ptr], [4 x ptr] } { +// OGCG-NO-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr null, ptr null, ptr @_ZN1C1xEv], +// OGCG-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr null, ptr @_ZTI1C, ptr @_ZN1C1xEv], +// OGCG-NO-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -24 to ptr), ptr null, ptr @_ZN1A1vEv] +// OGCG-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -24 to ptr), ptr @_ZTI1C, ptr @_ZN1A1vEv] +// OGCG-COMMON-SAME: }, align 8 + +// RTTI class type info for D + +// CIR-RTTI: cir.globa{{.*}} @_ZTVN10__cxxabiv121__vmi_class_type_infoE : !cir.ptr> + +// CIR-RTTI: cir.global{{.*}} @_ZTS1D = #cir.const_array<"1D" : !cir.array> : !cir.array + +// CIR-RTTI: cir.global{{.*}} @_ZTI1D = #cir.typeinfo<{ +// CIR-RTTI-SAME: #cir.global_view<@_ZTVN10__cxxabiv121__vmi_class_type_infoE, [2 : i32]> : !cir.ptr, +// CIR-RTTI-SAME: #cir.global_view<@_ZTS1D> : !cir.ptr, +// CIR-RTTI-SAME: #cir.int<2> : !u32i, #cir.int<2> : !u32i, +// CIR-RTTI-SAME: #cir.global_view<@_ZTI1B> : !cir.ptr, +// CIR-RTTI-SAME: #cir.int<2> : !s64i, +// CIR-RTTI-SAME: #cir.global_view<@_ZTI1C> : !cir.ptr, +// CIR-RTTI-SAME: #cir.int<4098> : !s64i}> : !rec_anon_struct + +// CIR-RTTI: cir.global{{.*}} @_ZTV1A : !rec_anon_struct3 + +// LLVM-RTTI: @_ZTVN10__cxxabiv121__vmi_class_type_infoE = external global ptr +// LLVM-RTTI: @_ZTS1D = global [2 x i8] c"1D", align 1 + +// LLVM-RTTI: @_ZTI1D = constant { ptr, ptr, i32, i32, ptr, i64, ptr, i64 } { +// LLVM-RTTI-SAME: ptr getelementptr (i8, ptr @_ZTVN10__cxxabiv121__vmi_class_type_infoE, i64 16), +// LLVM-RTTI-SAME: ptr @_ZTS1D, i32 2, i32 2, ptr @_ZTI1B, i64 2, ptr @_ZTI1C, i64 4098 } + +// OGCG-RTTI: @_ZTI1D = constant { ptr, ptr, i32, i32, ptr, i64, ptr, i64 } { +// OGCG-RTTI-SAME: ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv121__vmi_class_type_infoE, i64 2), +// OGCG-RTTI-SAME: ptr @_ZTS1D, i32 2, i32 2, ptr @_ZTI1B, i64 2, ptr @_ZTI1C, i64 4098 }, align 8 + +// OGCG-RTTI: @_ZTVN10__cxxabiv121__vmi_class_type_infoE = external global [0 x ptr] +// OGCG-RTTI: @_ZTS1D = constant [3 x i8] c"1D\00", align 1 +// OGCG-RTTI: @_ZTV1A = external unnamed_addr constant { [3 x ptr] }, align 8 D::D() {} // In CIR, this gets emitted after the B and C constructors. See below. // Base (C2) constructor for D -// OGCG: define {{.*}} void @_ZN1DC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} %[[VTT_ARG:.*]]) -// OGCG: %[[THIS_ADDR:.*]] = alloca ptr -// OGCG: %[[VTT_ADDR:.*]] = alloca ptr -// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] -// OGCG: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] -// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] -// OGCG: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] -// OGCG: %[[B_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 1 -// OGCG: call void @_ZN1BC2Ev(ptr {{.*}} %[[THIS]], ptr {{.*}} %[[B_VTT]]) -// OGCG: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 -// OGCG: %[[C_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 3 -// OGCG: call void @_ZN1CC2Ev(ptr {{.*}} %[[C_ADDR]], ptr {{.*}} %[[C_VTT]]) -// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] -// OGCG: store ptr %[[VPTR]], ptr %[[THIS]] -// OGCG: %[[D_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 5 -// OGCG: %[[D_VPTR:.*]] = load ptr, ptr %[[D_VPTR_ADDR]] -// OGCG: %[[D_VPTR_ADDR2:.*]] = load ptr, ptr %[[THIS]] -// OGCG: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[D_VPTR_ADDR2]], i64 -24 -// OGCG: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] -// OGCG: %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] -// OGCG: store ptr %[[D_VPTR]], ptr %[[BASE_PTR]] -// OGCG: %[[C_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 6 -// OGCG: %[[C_VPTR:.*]] = load ptr, ptr %[[C_VPTR_ADDR]] -// OGCG: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 -// OGCG: store ptr %[[C_VPTR]], ptr %[[C_ADDR]] - +// OGCG-COMMON: define {{.*}} void @_ZN1DC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} %[[VTT_ARG:.*]]) +// OGCG-COMMON: %[[THIS_ADDR:.*]] = alloca ptr +// OGCG-COMMON: %[[VTT_ADDR:.*]] = alloca ptr +// OGCG-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// OGCG-COMMON: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] +// OGCG-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// OGCG-COMMON: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] +// OGCG-COMMON: %[[B_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 1 +// OGCG-COMMON: call void @_ZN1BC2Ev(ptr {{.*}} %[[THIS]], ptr {{.*}} %[[B_VTT]]) +// OGCG-COMMON: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 +// OGCG-COMMON: %[[C_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 3 +// OGCG-COMMON: call void @_ZN1CC2Ev(ptr {{.*}} %[[C_ADDR]], ptr {{.*}} %[[C_VTT]]) +// OGCG-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] +// OGCG-COMMON: store ptr %[[VPTR]], ptr %[[THIS]] +// OGCG-COMMON: %[[D_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 5 +// OGCG-COMMON: %[[D_VPTR:.*]] = load ptr, ptr %[[D_VPTR_ADDR]] +// OGCG-COMMON: %[[D_VPTR_ADDR2:.*]] = load ptr, ptr %[[THIS]] +// OGCG-COMMON: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[D_VPTR_ADDR2]], i64 -24 +// OGCG-COMMON: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] +// OGCG-COMMON: %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] +// OGCG-COMMON: store ptr %[[D_VPTR]], ptr %[[BASE_PTR]] +// OGCG-COMMON: %[[C_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 6 +// OGCG-COMMON: %[[C_VPTR:.*]] = load ptr, ptr %[[C_VPTR_ADDR]] +// OGCG-COMMON: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 +// OGCG-COMMON: store ptr %[[C_VPTR]], ptr %[[C_ADDR]] // Base (C2) constructor for B -// CIR: cir.func {{.*}} @_ZN1BC2Ev -// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr -// CIR-SAME: %[[VTT_ARG:.*]]: !cir.ptr> -// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] -// CIR: %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init] -// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] -// CIR: cir.store %[[VTT_ARG]], %[[VTT_ADDR]] -// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] -// CIR: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]] -// CIR: %[[VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 0 -> !cir.ptr> -// CIR: %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[VTT_ADDR_POINT]] : !cir.ptr>), !cir.ptr -// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]] -// CIR: %[[B_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] -// CIR: cir.store{{.*}} %[[VPTR]], %[[B_VPTR_ADDR]] -// CIR: %[[B_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 1 -> !cir.ptr> -// CIR: %[[B_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[B_VTT_ADDR_POINT]] : !cir.ptr>), !cir.ptr -// CIR: %[[B_VPTR:.*]] = cir.load{{.*}} %[[B_VPTR_ADDR]] -// CIR: %[[B_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] -// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[B_VPTR_ADDR]] -// CIR: %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR]] : !cir.vptr), !cir.ptr -// CIR: %[[CONST_24:.*]] = cir.const #cir.int<-24> -// CIR: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr, %[[CONST_24]] : !s64i), !cir.ptr -// CIR: %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_ADDR]] : !cir.ptr), !cir.ptr -// CIR: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr, !s64i -// CIR: %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr), !cir.ptr -// CIR: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr, %[[BASE_OFFSET]] : !s64i), !cir.ptr -// CIR: %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr), !cir.ptr -// CIR: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]] -// CIR: cir.store{{.*}} %[[B_VPTR]], %[[BASE_VPTR_ADDR]] - -// LLVM: define {{.*}} void @_ZN1BC2Ev(ptr %[[THIS_ARG:.*]], ptr %[[VTT_ARG:.*]]) -// LLVM: %[[THIS_ADDR:.*]] = alloca ptr -// LLVM: %[[VTT_ADDR:.*]] = alloca ptr -// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] -// LLVM: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] -// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] -// LLVM: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] -// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] -// LLVM: store ptr %[[VPTR]], ptr %[[THIS]] -// LLVM: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 1 -// LLVM: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] -// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] -// LLVM: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 -// LLVM: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] -// LLVM: %[[BASE_PTR:.*]] = getelementptr i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] -// LLVM: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] - -// OGCG: define {{.*}} void @_ZN1BC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} %[[VTT_ARG:.*]]) -// OGCG: %[[THIS_ADDR:.*]] = alloca ptr -// OGCG: %[[VTT_ADDR:.*]] = alloca ptr -// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] -// OGCG: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] -// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] -// OGCG: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] -// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] -// OGCG: store ptr %[[VPTR]], ptr %[[THIS]] -// OGCG: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 1 -// OGCG: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] -// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] -// OGCG: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 -// OGCG: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] -// OGCG: %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] -// OGCG: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] +// CIR-COMMON: cir.func {{.*}} @_ZN1BC2Ev +// CIR-COMMON-SAME: %[[THIS_ARG:.*]]: !cir.ptr +// CIR-COMMON-SAME: %[[VTT_ARG:.*]]: !cir.ptr> +// CIR-COMMON: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] +// CIR-COMMON: %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init] +// CIR-COMMON: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] +// CIR-COMMON: cir.store %[[VTT_ARG]], %[[VTT_ADDR]] +// CIR-COMMON: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] +// CIR-COMMON: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]] +// CIR-COMMON: %[[VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 0 -> !cir.ptr> +// CIR-COMMON: %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[VTT_ADDR_POINT]] : !cir.ptr>), !cir.ptr +// CIR-COMMON: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]] +// CIR-COMMON: %[[B_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] +// CIR-COMMON: cir.store{{.*}} %[[VPTR]], %[[B_VPTR_ADDR]] +// CIR-COMMON: %[[B_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 1 -> !cir.ptr> +// CIR-COMMON: %[[B_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[B_VTT_ADDR_POINT]] : !cir.ptr>), !cir.ptr +// CIR-COMMON: %[[B_VPTR:.*]] = cir.load{{.*}} %[[B_VPTR_ADDR]] +// CIR-COMMON: %[[B_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] +// CIR-COMMON: %[[VPTR:.*]] = cir.load{{.*}} %[[B_VPTR_ADDR]] +// CIR-COMMON: %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR]] : !cir.vptr), !cir.ptr +// CIR-COMMON: %[[CONST_24:.*]] = cir.const #cir.int<-24> +// CIR-COMMON: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr, %[[CONST_24]] : !s64i), !cir.ptr +// CIR-COMMON: %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_ADDR]] : !cir.ptr), !cir.ptr +// CIR-COMMON: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr, !s64i +// CIR-COMMON: %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr), !cir.ptr +// CIR-COMMON: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr, %[[BASE_OFFSET]] : !s64i), !cir.ptr +// CIR-COMMON: %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr), !cir.ptr +// CIR-COMMON: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]] +// CIR-COMMON: cir.store{{.*}} %[[B_VPTR]], %[[BASE_VPTR_ADDR]] + +// LLVM-COMMON: define {{.*}} void @_ZN1BC2Ev(ptr %[[THIS_ARG:.*]], ptr %[[VTT_ARG:.*]]) +// LLVM-COMMON: %[[THIS_ADDR:.*]] = alloca ptr +// LLVM-COMMON: %[[VTT_ADDR:.*]] = alloca ptr +// LLVM-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// LLVM-COMMON: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] +// LLVM-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// LLVM-COMMON: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] +// LLVM-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] +// LLVM-COMMON: store ptr %[[VPTR]], ptr %[[THIS]] +// LLVM-COMMON: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 1 +// LLVM-COMMON: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] +// LLVM-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] +// LLVM-COMMON: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 +// LLVM-COMMON: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] +// LLVM-COMMON: %[[BASE_PTR:.*]] = getelementptr i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] +// LLVM-COMMON: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] + +// OGCG-COMMON: define {{.*}} void @_ZN1BC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} %[[VTT_ARG:.*]]) +// OGCG-COMMON: %[[THIS_ADDR:.*]] = alloca ptr +// OGCG-COMMON: %[[VTT_ADDR:.*]] = alloca ptr +// OGCG-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// OGCG-COMMON: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] +// OGCG-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// OGCG-COMMON: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] +// OGCG-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] +// OGCG-COMMON: store ptr %[[VPTR]], ptr %[[THIS]] +// OGCG-COMMON: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 1 +// OGCG-COMMON: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] +// OGCG-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] +// OGCG-COMMON: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 +// OGCG-COMMON: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] +// OGCG-COMMON: %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] +// OGCG-COMMON: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] // Base (C2) constructor for C -// CIR: cir.func {{.*}} @_ZN1CC2Ev -// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr -// CIR-SAME: %[[VTT_ARG:.*]]: !cir.ptr> -// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] -// CIR: %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init] -// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] -// CIR: cir.store %[[VTT_ARG]], %[[VTT_ADDR]] -// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] -// CIR: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]] -// CIR: %[[VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 0 -> !cir.ptr> -// CIR: %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[VTT_ADDR_POINT]] : !cir.ptr>), !cir.ptr -// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]] -// CIR: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] -// CIR: cir.store{{.*}} %[[VPTR]], %[[C_VPTR_ADDR]] -// CIR: %[[C_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 1 -> !cir.ptr> -// CIR: %[[C_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[C_VTT_ADDR_POINT]] : !cir.ptr>), !cir.ptr -// CIR: %[[C_VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]] -// CIR: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] -// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]] -// CIR: %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR]] : !cir.vptr), !cir.ptr -// CIR: %[[CONST_24:.*]] = cir.const #cir.int<-24> -// CIR: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr, %[[CONST_24]] : !s64i), !cir.ptr -// CIR: %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_ADDR]] : !cir.ptr), !cir.ptr -// CIR: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr, !s64i -// CIR: %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr), !cir.ptr -// CIR: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr, %[[BASE_OFFSET]] : !s64i), !cir.ptr -// CIR: %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr), !cir.ptr -// CIR: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]] -// CIR: cir.store{{.*}} %[[C_VPTR]], %[[BASE_VPTR_ADDR]] - -// LLVM: define {{.*}} void @_ZN1CC2Ev(ptr %[[THIS_ARG:.*]], ptr %[[VTT_ARG:.*]]) -// LLVM: %[[THIS_ADDR:.*]] = alloca ptr -// LLVM: %[[VTT_ADDR:.*]] = alloca ptr -// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] -// LLVM: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] -// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] -// LLVM: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] -// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] -// LLVM: store ptr %[[VPTR]], ptr %[[THIS]] -// LLVM: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 1 -// LLVM: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] -// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] -// LLVM: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 -// LLVM: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] -// LLVM: %[[BASE_PTR:.*]] = getelementptr i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] -// LLVM: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] - -// OGCG: define {{.*}} void @_ZN1CC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} %[[VTT_ARG:.*]]) -// OGCG: %[[THIS_ADDR:.*]] = alloca ptr -// OGCG: %[[VTT_ADDR:.*]] = alloca ptr -// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] -// OGCG: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] -// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] -// OGCG: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] -// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] -// OGCG: store ptr %[[VPTR]], ptr %[[THIS]] -// OGCG: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 1 -// OGCG: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] -// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] -// OGCG: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 -// OGCG: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] -// OGCG: %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] -// OGCG: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] +// CIR-COMMON: cir.func {{.*}} @_ZN1CC2Ev +// CIR-COMMON-SAME: %[[THIS_ARG:.*]]: !cir.ptr +// CIR-COMMON-SAME: %[[VTT_ARG:.*]]: !cir.ptr> +// CIR-COMMON: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] +// CIR-COMMON: %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init] +// CIR-COMMON: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] +// CIR-COMMON: cir.store %[[VTT_ARG]], %[[VTT_ADDR]] +// CIR-COMMON: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] +// CIR-COMMON: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]] +// CIR-COMMON: %[[VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 0 -> !cir.ptr> +// CIR-COMMON: %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[VTT_ADDR_POINT]] : !cir.ptr>), !cir.ptr +// CIR-COMMON: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]] +// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] +// CIR-COMMON: cir.store{{.*}} %[[VPTR]], %[[C_VPTR_ADDR]] +// CIR-COMMON: %[[C_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 1 -> !cir.ptr> +// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[C_VTT_ADDR_POINT]] : !cir.ptr>), !cir.ptr +// CIR-COMMON: %[[C_VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]] +// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] +// CIR-COMMON: %[[VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]] +// CIR-COMMON: %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR]] : !cir.vptr), !cir.ptr +// CIR-COMMON: %[[CONST_24:.*]] = cir.const #cir.int<-24> +// CIR-COMMON: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr, %[[CONST_24]] : !s64i), !cir.ptr +// CIR-COMMON: %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_ADDR]] : !cir.ptr), !cir.ptr +// CIR-COMMON: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr, !s64i +// CIR-COMMON: %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr), !cir.ptr +// CIR-COMMON: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr, %[[BASE_OFFSET]] : !s64i), !cir.ptr +// CIR-COMMON: %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr), !cir.ptr +// CIR-COMMON: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]] +// CIR-COMMON: cir.store{{.*}} %[[C_VPTR]], %[[BASE_VPTR_ADDR]] + +// LLVM-COMMON: define {{.*}} void @_ZN1CC2Ev(ptr %[[THIS_ARG:.*]], ptr %[[VTT_ARG:.*]]) +// LLVM-COMMON: %[[THIS_ADDR:.*]] = alloca ptr +// LLVM-COMMON: %[[VTT_ADDR:.*]] = alloca ptr +// LLVM-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// LLVM-COMMON: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] +// LLVM-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// LLVM-COMMON: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] +// LLVM-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] +// LLVM-COMMON: store ptr %[[VPTR]], ptr %[[THIS]] +// LLVM-COMMON: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 1 +// LLVM-COMMON: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] +// LLVM-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] +// LLVM-COMMON: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 +// LLVM-COMMON: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] +// LLVM-COMMON: %[[BASE_PTR:.*]] = getelementptr i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] +// LLVM-COMMON: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] + +// OGCG-COMMON: define {{.*}} void @_ZN1CC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} %[[VTT_ARG:.*]]) +// OGCG-COMMON: %[[THIS_ADDR:.*]] = alloca ptr +// OGCG-COMMON: %[[VTT_ADDR:.*]] = alloca ptr +// OGCG-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// OGCG-COMMON: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] +// OGCG-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// OGCG-COMMON: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] +// OGCG-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] +// OGCG-COMMON: store ptr %[[VPTR]], ptr %[[THIS]] +// OGCG-COMMON: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 1 +// OGCG-COMMON: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] +// OGCG-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] +// OGCG-COMMON: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 +// OGCG-COMMON: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] +// OGCG-COMMON: %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] +// OGCG-COMMON: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] // Base (C2) constructor for D -// CIR: cir.func {{.*}} @_ZN1DC2Ev -// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr -// CIR-SAME: %[[VTT_ARG:.*]]: !cir.ptr> -// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] -// CIR: %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init] -// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] -// CIR: cir.store %[[VTT_ARG]], %[[VTT_ADDR]] -// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] -// CIR: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]] -// CIR: %[[B_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [0] -> !cir.ptr -// CIR: %[[B_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 1 -> !cir.ptr> -// CIR: cir.call @_ZN1BC2Ev(%[[B_ADDR]], %[[B_VTT]]) nothrow : (!cir.ptr, !cir.ptr>) -> () -// CIR: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [16] -> !cir.ptr -// CIR: %[[C_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 3 -> !cir.ptr> -// CIR: cir.call @_ZN1CC2Ev(%[[C_ADDR]], %[[C_VTT]]) nothrow : (!cir.ptr, !cir.ptr>) -> () -// CIR: %[[D_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 0 -> !cir.ptr> -// CIR: %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[D_VTT]] : !cir.ptr>), !cir.ptr -// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]] : !cir.ptr, !cir.vptr -// CIR: %[[D_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] -// CIR: cir.store{{.*}} %[[VPTR]], %[[D_VPTR_ADDR]] -// CIR: %[[D_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 5 -> !cir.ptr> -// CIR: %[[D_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[D_VTT_ADDR_POINT]] : !cir.ptr>), !cir.ptr -// CIR: %[[D_VPTR:.*]] = cir.load{{.*}} %[[D_VPTR_ADDR]] : !cir.ptr, !cir.vptr -// CIR: %[[D_VPTR_ADDR2:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr -> !cir.ptr -// CIR: %[[VPTR2:.*]] = cir.load{{.*}} %[[D_VPTR_ADDR2]] : !cir.ptr, !cir.vptr -// CIR: %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR2]] : !cir.vptr), !cir.ptr -// CIR: %[[CONST_24:.*]] = cir.const #cir.int<-24> : !s64i -// CIR: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr, %[[CONST_24]] : !s64i), !cir.ptr -// CIR: %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_ADDR]] : !cir.ptr), !cir.ptr -// CIR: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr, !s64i -// CIR: %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr), !cir.ptr -// CIR: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr, %[[BASE_OFFSET]] : !s64i), !cir.ptr -// CIR: %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr), !cir.ptr -// CIR: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]] -// CIR: cir.store{{.*}} %[[D_VPTR]], %[[BASE_VPTR_ADDR]] -// CIR: %[[C_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 6 -> !cir.ptr> -// CIR: %[[C_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[C_VTT_ADDR_POINT]] : !cir.ptr>), !cir.ptr -// CIR: %[[C_VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]] : !cir.ptr, !cir.vptr -// CIR: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [16] -> !cir.ptr -// CIR: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[C_ADDR]] : !cir.ptr -> !cir.ptr -// CIR: cir.store{{.*}} %[[C_VPTR]], %[[C_VPTR_ADDR]] : !cir.vptr, !cir.ptr - -// LLVM: define {{.*}} void @_ZN1DC2Ev(ptr %[[THIS_ARG:.*]], ptr %[[VTT_ARG:.*]]) { -// LLVM: %[[THIS_ADDR:.*]] = alloca ptr -// LLVM: %[[VTT_ADDR:.*]] = alloca ptr -// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] -// LLVM: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] -// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] -// LLVM: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] -// LLVM: %[[B_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 1 -// LLVM: call void @_ZN1BC2Ev(ptr %[[THIS]], ptr %[[B_VTT]]) -// LLVM: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 -// LLVM: %[[C_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 3 -// LLVM: call void @_ZN1CC2Ev(ptr %[[C_ADDR]], ptr %[[C_VTT]]) -// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] -// LLVM: store ptr %[[VPTR]], ptr %[[THIS]] -// LLVM: %[[D_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 5 -// LLVM: %[[D_VPTR:.*]] = load ptr, ptr %[[D_VPTR_ADDR]] -// LLVM: %[[D_VPTR_ADDR2:.*]] = load ptr, ptr %[[THIS]] -// LLVM: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[D_VPTR_ADDR2]], i64 -24 -// LLVM: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] -// LLVM: %[[BASE_PTR:.*]] = getelementptr i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] -// LLVM: store ptr %[[D_VPTR]], ptr %[[BASE_PTR]] -// LLVM: %[[C_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 6 -// LLVM: %[[C_VPTR:.*]] = load ptr, ptr %[[C_VPTR_ADDR]] -// LLVM: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 -// LLVM: store ptr %[[C_VPTR]], ptr %[[C_ADDR]] +// CIR-COMMON: cir.func {{.*}} @_ZN1DC2Ev +// CIR-COMMON-SAME: %[[THIS_ARG:.*]]: !cir.ptr +// CIR-COMMON-SAME: %[[VTT_ARG:.*]]: !cir.ptr> +// CIR-COMMON: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] +// CIR-COMMON: %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init] +// CIR-COMMON: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] +// CIR-COMMON: cir.store %[[VTT_ARG]], %[[VTT_ADDR]] +// CIR-COMMON: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] +// CIR-COMMON: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]] +// CIR-COMMON: %[[B_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [0] -> !cir.ptr +// CIR-COMMON: %[[B_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 1 -> !cir.ptr> +// CIR-COMMON: cir.call @_ZN1BC2Ev(%[[B_ADDR]], %[[B_VTT]]) nothrow : (!cir.ptr, !cir.ptr>) -> () +// CIR-COMMON: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [16] -> !cir.ptr +// CIR-COMMON: %[[C_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 3 -> !cir.ptr> +// CIR-COMMON: cir.call @_ZN1CC2Ev(%[[C_ADDR]], %[[C_VTT]]) nothrow : (!cir.ptr, !cir.ptr>) -> () +// CIR-COMMON: %[[D_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 0 -> !cir.ptr> +// CIR-COMMON: %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[D_VTT]] : !cir.ptr>), !cir.ptr +// CIR-COMMON: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]] : !cir.ptr, !cir.vptr +// CIR-COMMON: %[[D_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] +// CIR-COMMON: cir.store{{.*}} %[[VPTR]], %[[D_VPTR_ADDR]] +// CIR-COMMON: %[[D_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 5 -> !cir.ptr> +// CIR-COMMON: %[[D_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[D_VTT_ADDR_POINT]] : !cir.ptr>), !cir.ptr +// CIR-COMMON: %[[D_VPTR:.*]] = cir.load{{.*}} %[[D_VPTR_ADDR]] : !cir.ptr, !cir.vptr +// CIR-COMMON: %[[D_VPTR_ADDR2:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: %[[VPTR2:.*]] = cir.load{{.*}} %[[D_VPTR_ADDR2]] : !cir.ptr, !cir.vptr +// CIR-COMMON: %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR2]] : !cir.vptr), !cir.ptr +// CIR-COMMON: %[[CONST_24:.*]] = cir.const #cir.int<-24> : !s64i +// CIR-COMMON: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr, %[[CONST_24]] : !s64i), !cir.ptr +// CIR-COMMON: %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_ADDR]] : !cir.ptr), !cir.ptr +// CIR-COMMON: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr, !s64i +// CIR-COMMON: %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr), !cir.ptr +// CIR-COMMON: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr, %[[BASE_OFFSET]] : !s64i), !cir.ptr +// CIR-COMMON: %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr), !cir.ptr +// CIR-COMMON: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]] +// CIR-COMMON: cir.store{{.*}} %[[D_VPTR]], %[[BASE_VPTR_ADDR]] +// CIR-COMMON: %[[C_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 6 -> !cir.ptr> +// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[C_VTT_ADDR_POINT]] : !cir.ptr>), !cir.ptr +// CIR-COMMON: %[[C_VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]] : !cir.ptr, !cir.vptr +// CIR-COMMON: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [16] -> !cir.ptr +// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[C_ADDR]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: cir.store{{.*}} %[[C_VPTR]], %[[C_VPTR_ADDR]] : !cir.vptr, !cir.ptr + +// LLVM-COMMON: define {{.*}} void @_ZN1DC2Ev(ptr %[[THIS_ARG:.*]], ptr %[[VTT_ARG:.*]]) { +// LLVM-COMMON: %[[THIS_ADDR:.*]] = alloca ptr +// LLVM-COMMON: %[[VTT_ADDR:.*]] = alloca ptr +// LLVM-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// LLVM-COMMON: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] +// LLVM-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// LLVM-COMMON: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] +// LLVM-COMMON: %[[B_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 1 +// LLVM-COMMON: call void @_ZN1BC2Ev(ptr %[[THIS]], ptr %[[B_VTT]]) +// LLVM-COMMON: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 +// LLVM-COMMON: %[[C_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 3 +// LLVM-COMMON: call void @_ZN1CC2Ev(ptr %[[C_ADDR]], ptr %[[C_VTT]]) +// LLVM-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] +// LLVM-COMMON: store ptr %[[VPTR]], ptr %[[THIS]] +// LLVM-COMMON: %[[D_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 5 +// LLVM-COMMON: %[[D_VPTR:.*]] = load ptr, ptr %[[D_VPTR_ADDR]] +// LLVM-COMMON: %[[D_VPTR_ADDR2:.*]] = load ptr, ptr %[[THIS]] +// LLVM-COMMON: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[D_VPTR_ADDR2]], i64 -24 +// LLVM-COMMON: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] +// LLVM-COMMON: %[[BASE_PTR:.*]] = getelementptr i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] +// LLVM-COMMON: store ptr %[[D_VPTR]], ptr %[[BASE_PTR]] +// LLVM-COMMON: %[[C_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 6 +// LLVM-COMMON: %[[C_VPTR:.*]] = load ptr, ptr %[[C_VPTR_ADDR]] +// LLVM-COMMON: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 +// LLVM-COMMON: store ptr %[[C_VPTR]], ptr %[[C_ADDR]] // The C2 constructor for D gets emitted earlier in OGCG, see above. // Base (C2) constructor for A -// CIR: cir.func {{.*}} @_ZN1AC2Ev -// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr -// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] -// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] -// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] -// CIR: %[[VPTR:.*]] = cir.vtable.address_point(@_ZTV1A, address_point = ) : !cir.vptr -// CIR: %[[VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr -> !cir.ptr -// CIR: cir.store{{.*}} %[[VPTR]], %[[VPTR_ADDR]] : !cir.vptr, !cir.ptr - -// LLVM: define {{.*}} void @_ZN1AC2Ev(ptr %[[THIS_ARG:.*]]) { -// LLVM: %[[THIS_ADDR:.*]] = alloca ptr, i64 1, align 8 -// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]], align 8 -// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8 -// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1A, i64 16), ptr %[[THIS]] +// CIR-COMMON: cir.func {{.*}} @_ZN1AC2Ev +// CIR-COMMON-SAME: %[[THIS_ARG:.*]]: !cir.ptr +// CIR-COMMON: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] +// CIR-COMMON: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] +// CIR-COMMON: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] +// CIR-COMMON: %[[VPTR:.*]] = cir.vtable.address_point(@_ZTV1A, address_point = ) : !cir.vptr +// CIR-COMMON: %[[VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: cir.store{{.*}} %[[VPTR]], %[[VPTR_ADDR]] : !cir.vptr, !cir.ptr + +// LLVM-COMMON: define {{.*}} void @_ZN1AC2Ev(ptr %[[THIS_ARG:.*]]) { +// LLVM-COMMON: %[[THIS_ADDR:.*]] = alloca ptr, i64 1, align 8 +// LLVM-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]], align 8 +// LLVM-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8 +// LLVM-COMMON: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1A, i64 16), ptr %[[THIS]] // The C2 constructor for A gets emitted later in OGCG, see below. // Complete (C1) constructor for D -// CIR: cir.func {{.*}} @_ZN1DC1Ev -// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr -// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] -// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] -// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] -// CIR: %[[A_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [40] -> !cir.ptr -// CIR: cir.call @_ZN1AC2Ev(%[[A_ADDR]]) nothrow : (!cir.ptr) -> () -// CIR: %[[B_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [0] -> !cir.ptr -// CIR: %[[B_VTT:.*]] = cir.vtt.address_point @_ZTT1D, offset = 1 -> !cir.ptr> -// CIR: cir.call @_ZN1BC2Ev(%[[B_ADDR]], %[[B_VTT]]) nothrow : (!cir.ptr, !cir.ptr>) -> () -// CIR: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [16] -> !cir.ptr -// CIR: %[[C_VTT:.*]] = cir.vtt.address_point @_ZTT1D, offset = 3 -> !cir.ptr> -// CIR: cir.call @_ZN1CC2Ev(%[[C_ADDR]], %[[C_VTT]]) nothrow : (!cir.ptr, !cir.ptr>) -> () -// CIR: %[[D_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, address_point = ) : !cir.vptr -// CIR: %[[VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr -> !cir.ptr -// CIR: cir.store{{.*}} %[[D_VPTR]], %[[VPTR_ADDR]] : !cir.vptr, !cir.ptr -// CIR: %[[A_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, address_point = ) : !cir.vptr -// CIR: %[[A_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [40] -> !cir.ptr -// CIR: %[[A_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[A_ADDR]] : !cir.ptr -> !cir.ptr -// CIR: cir.store{{.*}} %[[A_VPTR]], %[[A_VPTR_ADDR]] : !cir.vptr, !cir.ptr -// CIR: %[[C_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, address_point = ) : !cir.vptr -// CIR: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [16] -> !cir.ptr -// CIR: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[C_ADDR]] : !cir.ptr -> !cir.ptr -// CIR: cir.store{{.*}} %[[C_VPTR]], %[[C_VPTR_ADDR]] : !cir.vptr, !cir.ptr - -// LLVM: define {{.*}} void @_ZN1DC1Ev(ptr %[[THIS_ARG:.*]]) -// LLVM: %[[THIS_ADDR:.*]] = alloca ptr -// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] -// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] -// LLVM: %[[A_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 40 -// LLVM: call void @_ZN1AC2Ev(ptr %[[A_ADDR]]) -// LLVM: call void @_ZN1BC2Ev(ptr %[[THIS]], ptr getelementptr inbounds nuw (i8, ptr @_ZTT1D, i64 8)) -// LLVM: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 -// LLVM: call void @_ZN1CC2Ev(ptr %[[C_ADDR]], ptr getelementptr inbounds nuw (i8, ptr @_ZTT1D, i64 24)) -// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 24), ptr %[[THIS]] -// LLVM: %[[A_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 40 -// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 96), ptr %[[A_ADDR]] -// LLVM: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 -// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 64), ptr %[[C_ADDR]] - -// OGCG: define {{.*}} void @_ZN1DC1Ev(ptr {{.*}} %[[THIS_ARG:.*]]) -// OGCG: %[[THIS_ADDR:.*]] = alloca ptr -// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] -// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] -// OGCG: %[[A_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 40 -// OGCG: call void @_ZN1AC2Ev(ptr {{.*}} %[[A_ADDR]]) -// OGCG: call void @_ZN1BC2Ev(ptr {{.*}} %[[THIS]], ptr {{.*}} getelementptr inbounds ([7 x ptr], ptr @_ZTT1D, i64 0, i64 1)) -// OGCG: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 -// OGCG: call void @_ZN1CC2Ev(ptr {{.*}} %[[C_ADDR]], ptr {{.*}} getelementptr inbounds ([7 x ptr], ptr @_ZTT1D, i64 0, i64 3)) -// OGCG: store ptr getelementptr inbounds inrange(-24, 16) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 0, i32 3), ptr %[[THIS]] -// OGCG: %[[A_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 40 -// OGCG: store ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 2, i32 3), ptr %[[A_ADDR]] -// OGCG: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 -// OGCG: store ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 1, i32 3), ptr %[[C_ADDR]] - -// OGCG: define {{.*}} void @_ZN1AC2Ev(ptr {{.*}} %[[THIS_ARG:.*]]) -// OGCG: %[[THIS_ADDR:.*]] = alloca ptr -// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] -// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] -// OGCG: store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV1A, i32 0, i32 0, i32 2), ptr %[[THIS]] +// CIR-COMMON: cir.func {{.*}} @_ZN1DC1Ev +// CIR-COMMON-SAME: %[[THIS_ARG:.*]]: !cir.ptr +// CIR-COMMON: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] +// CIR-COMMON: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] +// CIR-COMMON: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] +// CIR-COMMON: %[[A_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [40] -> !cir.ptr +// CIR-COMMON: cir.call @_ZN1AC2Ev(%[[A_ADDR]]) nothrow : (!cir.ptr) -> () +// CIR-COMMON: %[[B_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [0] -> !cir.ptr +// CIR-COMMON: %[[B_VTT:.*]] = cir.vtt.address_point @_ZTT1D, offset = 1 -> !cir.ptr> +// CIR-COMMON: cir.call @_ZN1BC2Ev(%[[B_ADDR]], %[[B_VTT]]) nothrow : (!cir.ptr, !cir.ptr>) -> () +// CIR-COMMON: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [16] -> !cir.ptr +// CIR-COMMON: %[[C_VTT:.*]] = cir.vtt.address_point @_ZTT1D, offset = 3 -> !cir.ptr> +// CIR-COMMON: cir.call @_ZN1CC2Ev(%[[C_ADDR]], %[[C_VTT]]) nothrow : (!cir.ptr, !cir.ptr>) -> () +// CIR-COMMON: %[[D_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, address_point = ) : !cir.vptr +// CIR-COMMON: %[[VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: cir.store{{.*}} %[[D_VPTR]], %[[VPTR_ADDR]] : !cir.vptr, !cir.ptr +// CIR-COMMON: %[[A_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, address_point = ) : !cir.vptr +// CIR-COMMON: %[[A_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [40] -> !cir.ptr +// CIR-COMMON: %[[A_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[A_ADDR]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: cir.store{{.*}} %[[A_VPTR]], %[[A_VPTR_ADDR]] : !cir.vptr, !cir.ptr +// CIR-COMMON: %[[C_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, address_point = ) : !cir.vptr +// CIR-COMMON: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [16] -> !cir.ptr +// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[C_ADDR]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: cir.store{{.*}} %[[C_VPTR]], %[[C_VPTR_ADDR]] : !cir.vptr, !cir.ptr + +// LLVM-COMMON: define {{.*}} void @_ZN1DC1Ev(ptr %[[THIS_ARG:.*]]) +// LLVM-COMMON: %[[THIS_ADDR:.*]] = alloca ptr +// LLVM-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// LLVM-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// LLVM-COMMON: %[[A_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 40 +// LLVM-COMMON: call void @_ZN1AC2Ev(ptr %[[A_ADDR]]) +// LLVM-COMMON: call void @_ZN1BC2Ev(ptr %[[THIS]], ptr getelementptr inbounds nuw (i8, ptr @_ZTT1D, i64 8)) +// LLVM-COMMON: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 +// LLVM-COMMON: call void @_ZN1CC2Ev(ptr %[[C_ADDR]], ptr getelementptr inbounds nuw (i8, ptr @_ZTT1D, i64 24)) +// LLVM-COMMON: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 24), ptr %[[THIS]] +// LLVM-COMMON: %[[A_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 40 +// LLVM-COMMON: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 96), ptr %[[A_ADDR]] +// LLVM-COMMON: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 +// LLVM-COMMON: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 64), ptr %[[C_ADDR]] + +// OGCG-COMMON: define {{.*}} void @_ZN1DC1Ev(ptr {{.*}} %[[THIS_ARG:.*]]) +// OGCG-COMMON: %[[THIS_ADDR:.*]] = alloca ptr +// OGCG-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// OGCG-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// OGCG-COMMON: %[[A_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 40 +// OGCG-COMMON: call void @_ZN1AC2Ev(ptr {{.*}} %[[A_ADDR]]) +// OGCG-COMMON: call void @_ZN1BC2Ev(ptr {{.*}} %[[THIS]], ptr {{.*}} getelementptr inbounds ([7 x ptr], ptr @_ZTT1D, i64 0, i64 1)) +// OGCG-COMMON: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 +// OGCG-COMMON: call void @_ZN1CC2Ev(ptr {{.*}} %[[C_ADDR]], ptr {{.*}} getelementptr inbounds ([7 x ptr], ptr @_ZTT1D, i64 0, i64 3)) +// OGCG-COMMON: store ptr getelementptr inbounds inrange(-24, 16) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 0, i32 3), ptr %[[THIS]] +// OGCG-COMMON: %[[A_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 40 +// OGCG-COMMON: store ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 2, i32 3), ptr %[[A_ADDR]] +// OGCG-COMMON: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 +// OGCG-COMMON: store ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 1, i32 3), ptr %[[C_ADDR]] + +// OGCG-COMMON: define {{.*}} void @_ZN1AC2Ev(ptr {{.*}} %[[THIS_ARG:.*]]) +// OGCG-COMMON: %[[THIS_ADDR:.*]] = alloca ptr +// OGCG-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// OGCG-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// OGCG-COMMON: store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV1A, i32 0, i32 0, i32 2), ptr %[[THIS]]