diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index a6f10e6bb9e9c..84acc74ccf0f5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -519,6 +519,14 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return createGlobal(module, loc, uniqueName, type, isConstant, linkage); } + cir::StackSaveOp createStackSave(mlir::Location loc, mlir::Type ty) { + return cir::StackSaveOp::create(*this, loc, ty); + } + + cir::StackRestoreOp createStackRestore(mlir::Location loc, mlir::Value v) { + return cir::StackRestoreOp::create(*this, loc, v); + } + mlir::Value createSetBitfield(mlir::Location loc, mlir::Type resultType, Address dstAddr, mlir::Type storageType, mlir::Value src, const CIRGenBitFieldInfo &info, diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 039d29033ea87..4a19d91dcf4fa 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -44,38 +44,70 @@ CIRGenFunction::emitAutoVarAlloca(const VarDecl &d, // If the type is variably-modified, emit all the VLA sizes for it. if (ty->isVariablyModifiedType()) - cgm.errorNYI(d.getSourceRange(), "emitAutoVarDecl: variably modified type"); + emitVariablyModifiedType(ty); assert(!cir::MissingFeatures::openMP()); Address address = Address::invalid(); - if (!ty->isConstantSizeType()) - cgm.errorNYI(d.getSourceRange(), "emitAutoVarDecl: non-constant size type"); - - // A normal fixed sized variable becomes an alloca in the entry block, - // unless: - // - it's an NRVO variable. - // - we are compiling OpenMP and it's an OpenMP local variable. - if (nrvo) { - // The named return value optimization: allocate this variable in the - // return slot, so that we can elide the copy when returning this - // variable (C++0x [class.copy]p34). - address = returnValue; - - if (const RecordDecl *rd = ty->getAsRecordDecl()) { - if (const auto *cxxrd = dyn_cast(rd); - (cxxrd && !cxxrd->hasTrivialDestructor()) || - rd->isNonTrivialToPrimitiveDestroy()) - cgm.errorNYI(d.getSourceRange(), "emitAutoVarAlloca: set NRVO flag"); + if (ty->isConstantSizeType()) { + // A normal fixed sized variable becomes an alloca in the entry block, + // unless: + // - it's an NRVO variable. + // - we are compiling OpenMP and it's an OpenMP local variable. + if (nrvo) { + // The named return value optimization: allocate this variable in the + // return slot, so that we can elide the copy when returning this + // variable (C++0x [class.copy]p34). + address = returnValue; + + if (const RecordDecl *rd = ty->getAsRecordDecl()) { + if (const auto *cxxrd = dyn_cast(rd); + (cxxrd && !cxxrd->hasTrivialDestructor()) || + rd->isNonTrivialToPrimitiveDestroy()) + cgm.errorNYI(d.getSourceRange(), "emitAutoVarAlloca: set NRVO flag"); + } + } else { + // A normal fixed sized variable becomes an alloca in the entry block, + mlir::Type allocaTy = convertTypeForMem(ty); + // Create the temp alloca and declare variable using it. + address = createTempAlloca(allocaTy, alignment, loc, d.getName(), + /*arraySize=*/nullptr, /*alloca=*/nullptr, ip); + declare(address.getPointer(), &d, ty, getLoc(d.getSourceRange()), + alignment); } } else { - // A normal fixed sized variable becomes an alloca in the entry block, - mlir::Type allocaTy = convertTypeForMem(ty); - // Create the temp alloca and declare variable using it. - address = createTempAlloca(allocaTy, alignment, loc, d.getName(), - /*arraySize=*/nullptr, /*alloca=*/nullptr, ip); - declare(address.getPointer(), &d, ty, getLoc(d.getSourceRange()), - alignment); + // Non-constant size type + assert(!cir::MissingFeatures::openMP()); + if (!didCallStackSave) { + // Save the stack. + cir::PointerType defaultTy = AllocaInt8PtrTy; + CharUnits align = CharUnits::fromQuantity( + cgm.getDataLayout().getAlignment(defaultTy, false)); + Address stack = createTempAlloca(defaultTy, align, loc, "saved_stack"); + + mlir::Value v = builder.createStackSave(loc, defaultTy); + assert(v.getType() == AllocaInt8PtrTy); + builder.createStore(loc, v, stack); + + didCallStackSave = true; + + // Push a cleanup block and restore the stack there. + // FIXME: in general circumstances, this should be an EH cleanup. + pushStackRestore(NormalCleanup, stack); + } + + VlaSizePair vlaSize = getVLASize(ty); + mlir::Type memTy = convertTypeForMem(vlaSize.type); + + // Allocate memory for the array. + address = + createTempAlloca(memTy, alignment, loc, d.getName(), vlaSize.numElts, + /*alloca=*/nullptr, builder.saveInsertionPoint()); + + // If we have debug info enabled, properly describe the VLA dimensions for + // this type by registering the vla size expression for each of the + // dimensions. + assert(!cir::MissingFeatures::generateDebugInfo()); } emission.addr = address; @@ -696,6 +728,16 @@ struct DestroyObject final : EHScopeStack::Cleanup { cgf.emitDestroy(addr, type, destroyer); } }; + +struct CallStackRestore final : EHScopeStack::Cleanup { + Address stack; + CallStackRestore(Address stack) : stack(stack) {} + void emit(CIRGenFunction &cgf) override { + mlir::Location loc = stack.getPointer().getLoc(); + mlir::Value v = cgf.getBuilder().createLoad(loc, stack); + cgf.getBuilder().createStackRestore(loc, v); + } +}; } // namespace void CIRGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr, @@ -805,6 +847,10 @@ CIRGenFunction::getDestroyer(QualType::DestructionKind kind) { llvm_unreachable("Unknown DestructionKind"); } +void CIRGenFunction::pushStackRestore(CleanupKind kind, Address spMem) { + ehStack.pushCleanup(kind, spMem); +} + /// Enter a destroy cleanup for the given local variable. void CIRGenFunction::emitAutoVarTypeCleanup( const CIRGenFunction::AutoVarEmission &emission, diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index f416571181153..4897c29b58a1f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2068,7 +2068,7 @@ mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty, mlir::OpBuilder::InsertionGuard guard(builder); builder.restoreInsertionPoint(ip); addr = builder.createAlloca(loc, /*addr type*/ localVarPtrTy, - /*var type*/ ty, name, alignIntAttr); + /*var type*/ ty, name, alignIntAttr, arraySize); assert(!cir::MissingFeatures::astVarDeclInterface()); } return addr; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 01a43a997637c..ba36cbeeb620e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -410,6 +410,8 @@ void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType, curFn = fn; const Decl *d = gd.getDecl(); + + didCallStackSave = false; curCodeDecl = d; const auto *fd = dyn_cast_or_null(d); curFuncDecl = d->getNonClosureContext(); @@ -1006,6 +1008,41 @@ mlir::Value CIRGenFunction::emitAlignmentAssumption( offsetValue); } +CIRGenFunction::VlaSizePair CIRGenFunction::getVLASize(QualType type) { + const VariableArrayType *vla = + cgm.getASTContext().getAsVariableArrayType(type); + assert(vla && "type was not a variable array type!"); + return getVLASize(vla); +} + +CIRGenFunction::VlaSizePair +CIRGenFunction::getVLASize(const VariableArrayType *type) { + // The number of elements so far; always size_t. + mlir::Value numElements; + + QualType elementType; + do { + elementType = type->getElementType(); + mlir::Value vlaSize = vlaSizeMap[type->getSizeExpr()]; + assert(vlaSize && "no size for VLA!"); + assert(vlaSize.getType() == SizeTy); + + if (!numElements) { + numElements = vlaSize; + } else { + // It's undefined behavior if this wraps around, so mark it that way. + // FIXME: Teach -fsanitize=undefined to trap this. + + numElements = + builder.createMul(numElements.getLoc(), numElements, vlaSize, + cir::OverflowBehavior::NoUnsignedWrap); + } + } while ((type = getContext().getAsVariableArrayType(elementType))); + + assert(numElements && "Undefined elements number"); + return {numElements, elementType}; +} + // TODO(cir): Most of this function can be shared between CIRGen // and traditional LLVM codegen void CIRGenFunction::emitVariablyModifiedType(QualType type) { @@ -1086,7 +1123,26 @@ void CIRGenFunction::emitVariablyModifiedType(QualType type) { break; case Type::VariableArray: { - cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType VLA"); + // Losing element qualification here is fine. + const VariableArrayType *vat = cast(ty); + + // Unknown size indication requires no size computation. + // Otherwise, evaluate and record it. + if (const Expr *sizeExpr = vat->getSizeExpr()) { + // It's possible that we might have emitted this already, + // e.g. with a typedef and a pointer to it. + mlir::Value &entry = vlaSizeMap[sizeExpr]; + if (!entry) { + mlir::Value size = emitScalarExpr(sizeExpr); + assert(!cir::MissingFeatures::sanitizers()); + + // Always zexting here would be wrong if it weren't + // undefined behavior to have a negative bound. + // FIXME: What about when size's type is larger than size_t? + entry = builder.createIntCast(size, SizeTy); + } + } + type = vat->getElementType(); break; } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index d71de2ffde6a1..0d64c31f01668 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -149,6 +149,10 @@ class CIRGenFunction : public CIRGenTypeCache { using SymTableTy = llvm::ScopedHashTable; SymTableTy symbolTable; + /// Whether a cir.stacksave operation has been added. Used to avoid + /// inserting cir.stacksave for multiple VLAs in the same scope. + bool didCallStackSave = false; + /// Whether or not a Microsoft-style asm block has been processed within /// this fuction. These can potentially set the return value. bool sawAsmBlock = false; @@ -188,6 +192,14 @@ class CIRGenFunction : public CIRGenTypeCache { llvm::DenseMap opaqueLValues; llvm::DenseMap opaqueRValues; + // This keeps track of the associated size for each VLA type. + // We track this by the size expression rather than the type itself because + // in certain situations, like a const qualifier applied to an VLA typedef, + // multiple VLA types can share the same size expression. + // FIXME: Maybe this could be a stack of maps that is pushed/popped as we + // enter/leave scopes. + llvm::DenseMap vlaSizeMap; + public: /// A non-RAII class containing all the information about a bound /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for @@ -436,6 +448,20 @@ class CIRGenFunction : public CIRGenTypeCache { } }; + struct VlaSizePair { + mlir::Value numElts; + QualType type; + + VlaSizePair(mlir::Value num, QualType ty) : numElts(num), type(ty) {} + }; + + /// Returns an MLIR::Value+QualType pair that corresponds to the size, + /// in non-variably-sized elements, of a variable length array type, + /// plus that largest non-variably-sized element type. Assumes that + /// the type has already been emitted with emitVariablyModifiedType. + VlaSizePair getVLASize(const VariableArrayType *type); + VlaSizePair getVLASize(QualType type); + void finishFunction(SourceLocation endLoc); /// Determine whether the given initializer is trivial in the sense @@ -583,6 +609,8 @@ class CIRGenFunction : public CIRGenTypeCache { return needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup; } + void pushStackRestore(CleanupKind kind, Address spMem); + /// Set the address of a local variable. void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr) { assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!"); @@ -854,6 +882,7 @@ class CIRGenFunction : public CIRGenTypeCache { protected: bool performCleanup; + bool oldDidCallStackSave; private: RunCleanupsScope(const RunCleanupsScope &) = delete; @@ -867,6 +896,8 @@ class CIRGenFunction : public CIRGenTypeCache { explicit RunCleanupsScope(CIRGenFunction &cgf) : performCleanup(true), cgf(cgf) { cleanupStackDepth = cgf.ehStack.stable_begin(); + oldDidCallStackSave = cgf.didCallStackSave; + cgf.didCallStackSave = false; oldCleanupStackDepth = cgf.currentCleanupStackDepth; cgf.currentCleanupStackDepth = cleanupStackDepth; } @@ -883,6 +914,7 @@ class CIRGenFunction : public CIRGenTypeCache { assert(performCleanup && "Already forced cleanup"); { mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder()); + cgf.didCallStackSave = oldDidCallStackSave; cgf.popCleanupBlocks(cleanupStackDepth); performCleanup = false; cgf.currentCleanupStackDepth = oldCleanupStackDepth; diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index fe1ea5617b8cd..622feae9dece6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -88,6 +88,8 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext, FP80Ty = cir::FP80Type::get(&getMLIRContext()); FP128Ty = cir::FP128Type::get(&getMLIRContext()); + AllocaInt8PtrTy = cir::PointerType::get(UInt8Ty, cirAllocaAddressSpace); + PointerAlignInBytes = astContext .toCharUnitsFromBits( diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index 273ec7f06b4b5..b5612d9127506 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -65,6 +65,9 @@ struct CIRGenTypeCache { cir::PointerType VoidPtrTy; cir::PointerType UInt8PtrTy; + /// void* in alloca address space + cir::PointerType AllocaInt8PtrTy; + /// The size and alignment of a pointer into the generic address space. union { unsigned char PointerAlignInBytes; diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 2ab1ea0c8ff8e..3cfd15c978e1f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -421,6 +421,16 @@ mlir::Type CIRGenTypes::convertType(QualType type) { break; } + case Type::VariableArray: { + const VariableArrayType *a = cast(ty); + if (a->getIndexTypeCVRQualifiers() != 0) + cgm.errorNYI(SourceLocation(), "non trivial array types", type); + // VLAs resolve to the innermost element type; this matches + // the return of alloca, and there isn't any obviously better choice. + resultType = convertTypeForMem(a->getElementType()); + break; + } + case Type::IncompleteArray: { const IncompleteArrayType *arrTy = cast(ty); if (arrTy->getIndexTypeCVRQualifiers() != 0) diff --git a/clang/test/CIR/CodeGen/vla.c b/clang/test/CIR/CodeGen/vla.c new file mode 100644 index 0000000000000..e2adf457c7f69 --- /dev/null +++ b/clang/test/CIR/CodeGen/vla.c @@ -0,0 +1,285 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll +// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG + +void f0(int len) { + int arr[len]; +} + +// CIR: cir.func{{.*}} @f0(%[[LEN_ARG:.*]]: !s32i {{.*}}) +// CIR: %[[LEN_ADDR:.*]] = cir.alloca !s32i, !cir.ptr, ["len", init] +// CIR: %[[SAVED_STACK:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["saved_stack"] +// CIR: cir.store{{.*}} %[[LEN_ARG]], %[[LEN_ADDR]] +// CIR: %[[LEN:.*]] = cir.load{{.*}} %[[LEN_ADDR]] +// CIR: %[[LEN_SIZE_T:.*]] = cir.cast integral %[[LEN]] : !s32i -> !u64i +// CIR: %[[STACK_PTR:.*]] = cir.stacksave +// CIR: cir.store{{.*}} %[[STACK_PTR]], %[[SAVED_STACK]] +// CIR: %[[ARR:.*]] = cir.alloca !s32i, !cir.ptr, %[[LEN_SIZE_T]] : !u64i, ["arr"] +// CIR: %[[STACK_RESTORE_PTR:.*]] = cir.load{{.*}} %[[SAVED_STACK]] +// CIR: cir.stackrestore %[[STACK_RESTORE_PTR]] + +// LLVM: define{{.*}} void @f0(i32 %[[LEN_ARG:.*]]) { +// LLVM: %[[LEN_ADDR:.*]] = alloca i32 +// LLVM: %[[SAVED_STACK:.*]] = alloca ptr +// LLVM: store i32 %[[LEN_ARG]], ptr %[[LEN_ADDR]] +// LLVM: %[[LEN:.*]] = load i32, ptr %[[LEN_ADDR]] +// LLVM: %[[LEN_SIZE_T:.*]] = sext i32 %[[LEN]] to i64 +// LLVM: %[[STACK_PTR:.*]] = call ptr @llvm.stacksave.p0() +// LLVM: store ptr %[[STACK_PTR]], ptr %[[SAVED_STACK]] +// LLVM: %[[ARR:.*]] = alloca i32, i64 %[[LEN_SIZE_T]] +// LLVM: %[[STACK_RESTORE_PTR:.*]] = load ptr, ptr %[[SAVED_STACK]] +// LLVM: call void @llvm.stackrestore.p0(ptr %[[STACK_RESTORE_PTR]]) + +// Note: VLA_EXPR0 below is emitted to capture debug info. + +// OGCG: define{{.*}} void @f0(i32 {{.*}} %[[LEN_ARG:.*]]) +// OGCG: %[[LEN_ADDR:.*]] = alloca i32 +// OGCG: %[[SAVED_STACK:.*]] = alloca ptr +// OGCG: %[[VLA_EXPR0:.*]] = alloca i64 +// OGCG: store i32 %[[LEN_ARG]], ptr %[[LEN_ADDR]] +// OGCG: %[[LEN:.*]] = load i32, ptr %[[LEN_ADDR]] +// OGCG: %[[LEN_SIZE_T:.*]] = zext i32 %[[LEN]] to i64 +// OGCG: %[[STACK_PTR:.*]] = call ptr @llvm.stacksave.p0() +// OGCG: store ptr %[[STACK_PTR]], ptr %[[SAVED_STACK]] +// OGCG: %[[ARR:.*]] = alloca i32, i64 %[[LEN_SIZE_T]] +// OGCG: store i64 %[[LEN_SIZE_T]], ptr %[[VLA_EXPR0]] +// OGCG: %[[STACK_RESTORE_PTR:.*]] = load ptr, ptr %[[SAVED_STACK]] +// OGCG: call void @llvm.stackrestore.p0(ptr %[[STACK_RESTORE_PTR]]) + +void f1(int len) { + int arr[16][len]; +} + +// CIR: cir.func{{.*}} @f1(%[[LEN_ARG:.*]]: !s32i {{.*}}) +// CIR: %[[LEN_ADDR:.*]] = cir.alloca !s32i, !cir.ptr, ["len", init] +// CIR: %[[SAVED_STACK:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["saved_stack"] +// CIR: cir.store{{.*}} %[[LEN_ARG]], %[[LEN_ADDR]] +// CIR: %[[SIXTEEN:.*]] = cir.const #cir.int<16> : !s32i +// CIR: %[[SIXTEEN_SIZE_T:.*]] = cir.cast integral %[[SIXTEEN]] : !s32i -> !u64i +// CIR: %[[LEN:.*]] = cir.load{{.*}} %[[LEN_ADDR]] +// CIR: %[[LEN_SIZE_T:.*]] = cir.cast integral %[[LEN]] : !s32i -> !u64i +// CIR: %[[STACK_PTR:.*]] = cir.stacksave +// CIR: cir.store{{.*}} %[[STACK_PTR]], %[[SAVED_STACK]] +// CIR: %[[TOTAL_LEN:.*]] = cir.binop(mul, %[[SIXTEEN_SIZE_T]], %[[LEN_SIZE_T]]) nuw +// CIR: %[[ARR:.*]] = cir.alloca !s32i, !cir.ptr, %[[TOTAL_LEN]] : !u64i, ["arr"] +// CIR: %[[STACK_RESTORE_PTR:.*]] = cir.load{{.*}} %[[SAVED_STACK]] +// CIR: cir.stackrestore %[[STACK_RESTORE_PTR]] + +// LLVM: define{{.*}} void @f1(i32 %[[LEN_ARG:.*]]) { +// LLVM: %[[LEN_ADDR:.*]] = alloca i32 +// LLVM: %[[SAVED_STACK:.*]] = alloca ptr +// LLVM: store i32 %[[LEN_ARG]], ptr %[[LEN_ADDR]] +// LLVM: %[[LEN:.*]] = load i32, ptr %[[LEN_ADDR]] +// LLVM: %[[LEN_SIZE_T:.*]] = sext i32 %[[LEN]] to i64 +// LLVM: %[[STACK_PTR:.*]] = call ptr @llvm.stacksave.p0() +// LLVM: store ptr %[[STACK_PTR]], ptr %[[SAVED_STACK]] +// LLVM: %[[TOTAL_LEN:.*]] = mul nuw i64 16, %[[LEN_SIZE_T]] +// LLVM: %[[ARR:.*]] = alloca i32, i64 %[[TOTAL_LEN]] +// LLVM: %[[STACK_RESTORE_PTR:.*]] = load ptr, ptr %[[SAVED_STACK]] +// LLVM: call void @llvm.stackrestore.p0(ptr %[[STACK_RESTORE_PTR]]) + +// Note: VLA_EXPR0 below is emitted to capture debug info. + +// OGCG: define{{.*}} void @f1(i32 {{.*}} %[[LEN_ARG:.*]]) +// OGCG: %[[LEN_ADDR:.*]] = alloca i32 +// OGCG: %[[SAVED_STACK:.*]] = alloca ptr +// OGCG: %[[VLA_EXPR0:.*]] = alloca i64 +// OGCG: store i32 %[[LEN_ARG]], ptr %[[LEN_ADDR]] +// OGCG: %[[LEN:.*]] = load i32, ptr %[[LEN_ADDR]] +// OGCG: %[[LEN_SIZE_T:.*]] = zext i32 %[[LEN]] to i64 +// OGCG: %[[STACK_PTR:.*]] = call ptr @llvm.stacksave.p0() +// OGCG: store ptr %[[STACK_PTR]], ptr %[[SAVED_STACK]] +// OGCG: %[[TOTAL_LEN:.*]] = mul nuw i64 16, %[[LEN_SIZE_T]] +// OGCG: %[[ARR:.*]] = alloca i32, i64 %[[TOTAL_LEN]] +// OGCG: store i64 %[[LEN_SIZE_T]], ptr %[[VLA_EXPR0]] +// OGCG: %[[STACK_RESTORE_PTR:.*]] = load ptr, ptr %[[SAVED_STACK]] +// OGCG: call void @llvm.stackrestore.p0(ptr %[[STACK_RESTORE_PTR]]) + +void f2(int len) { + int arr[len + 4]; +} + +// CIR: cir.func{{.*}} @f2(%[[LEN_ARG:.*]]: !s32i {{.*}}) +// CIR: %[[LEN_ADDR:.*]] = cir.alloca !s32i, !cir.ptr, ["len", init] +// CIR: %[[SAVED_STACK:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["saved_stack"] +// CIR: cir.store{{.*}} %[[LEN_ARG]], %[[LEN_ADDR]] +// CIR: %[[LEN:.*]] = cir.load{{.*}} %[[LEN_ADDR]] +// CIR: %[[FOUR:.*]] = cir.const #cir.int<4> : !s32i +// CIR: %[[TOTAL_LEN:.*]] = cir.binop(add, %[[LEN]], %[[FOUR]]) nsw : !s32i +// CIR: %[[TOTAL_LEN_SIZE_T:.*]] = cir.cast integral %[[TOTAL_LEN]] : !s32i -> !u64i +// CIR: %[[STACK_PTR:.*]] = cir.stacksave +// CIR: cir.store{{.*}} %[[STACK_PTR]], %[[SAVED_STACK]] +// CIR: %[[ARR:.*]] = cir.alloca !s32i, !cir.ptr, %[[TOTAL_LEN_SIZE_T]] : !u64i, ["arr"] +// CIR: %[[STACK_RESTORE_PTR:.*]] = cir.load{{.*}} %[[SAVED_STACK]] +// CIR: cir.stackrestore %[[STACK_RESTORE_PTR]] + +// LLVM: define{{.*}} void @f2(i32 %[[LEN_ARG:.*]]) { +// LLVM: %[[LEN_ADDR:.*]] = alloca i32 +// LLVM: %[[SAVED_STACK:.*]] = alloca ptr +// LLVM: store i32 %[[LEN_ARG]], ptr %[[LEN_ADDR]] +// LLVM: %[[LEN:.*]] = load i32, ptr %[[LEN_ADDR]] +// LLVM: %[[TOTAL_LEN:.*]] = add nsw i32 %[[LEN]], 4 +// LLVM: %[[TOTAL_LEN_SIZE_T:.*]] = sext i32 %[[TOTAL_LEN]] to i64 +// LLVM: %[[STACK_PTR:.*]] = call ptr @llvm.stacksave.p0() +// LLVM: store ptr %[[STACK_PTR]], ptr %[[SAVED_STACK]] +// LLVM: %[[ARR:.*]] = alloca i32, i64 %[[TOTAL_LEN_SIZE_T]] +// LLVM: %[[STACK_RESTORE_PTR:.*]] = load ptr, ptr %[[SAVED_STACK]] +// LLVM: call void @llvm.stackrestore.p0(ptr %[[STACK_RESTORE_PTR]]) + +// Note: VLA_EXPR0 below is emitted to capture debug info. + +// OGCG: define{{.*}} void @f2(i32 {{.*}} %[[LEN_ARG:.*]]) +// OGCG: %[[LEN_ADDR:.*]] = alloca i32 +// OGCG: %[[SAVED_STACK:.*]] = alloca ptr +// OGCG: %[[VLA_EXPR0:.*]] = alloca i64 +// OGCG: store i32 %[[LEN_ARG]], ptr %[[LEN_ADDR]] +// OGCG: %[[LEN:.*]] = load i32, ptr %[[LEN_ADDR]] +// OGCG: %[[TOTAL_LEN:.*]] = add nsw i32 %[[LEN]], 4 +// OGCG: %[[TOTAL_LEN_SIZE_T:.*]] = zext i32 %[[TOTAL_LEN]] to i64 +// OGCG: %[[STACK_PTR:.*]] = call ptr @llvm.stacksave.p0() +// OGCG: store ptr %[[STACK_PTR]], ptr %[[SAVED_STACK]] +// OGCG: %[[ARR:.*]] = alloca i32, i64 %[[TOTAL_LEN_SIZE_T]] +// OGCG: store i64 %[[TOTAL_LEN_SIZE_T]], ptr %[[VLA_EXPR0]] +// OGCG: %[[STACK_RESTORE_PTR:.*]] = load ptr, ptr %[[SAVED_STACK]] +// OGCG: call void @llvm.stackrestore.p0(ptr %[[STACK_RESTORE_PTR]]) + +void f3(unsigned len) { + char s1[len]; + unsigned i = 0u; + while (++i < len) { + char s2[i]; + } +} + +// CIR: cir.func{{.*}} @f3(%[[LEN_ARG:.*]]: !u32i {{.*}}) +// CIR: %[[LEN_ADDR:.*]] = cir.alloca !u32i, !cir.ptr, ["len", init] +// CIR: %[[SAVED_STACK:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["saved_stack"] +// CIR: cir.store{{.*}} %[[LEN_ARG]], %[[LEN_ADDR]] +// CIR: %[[LEN:.*]] = cir.load{{.*}} %[[LEN_ADDR]] +// CIR: %[[LEN_SIZE_T:.*]] = cir.cast integral %[[LEN]] : !u32i -> !u64i +// CIR: %[[STACK_PTR:.*]] = cir.stacksave +// CIR: cir.store{{.*}} %[[STACK_PTR]], %[[SAVED_STACK]] +// CIR: %[[S1:.*]] = cir.alloca !s8i, !cir.ptr, %[[LEN_SIZE_T]] : !u64i, ["s1"] +// CIR: %[[I:.*]] = cir.alloca !u32i, !cir.ptr, ["i", init] +// CIR: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CIR: cir.store{{.*}} %[[ZERO]], %[[I]] +// CIR: cir.scope { +// CIR: cir.while { +// CIR: %[[CUR_I:.*]] = cir.load{{.*}} %[[I]] +// CIR: %[[NEXT:.*]] = cir.unary(inc, %[[CUR_I]]) +// CIR: cir.store{{.*}} %[[NEXT]], %[[I]] +// CIR: %[[LEN2:.*]] = cir.load{{.*}} %[[LEN_ADDR]] +// CIR: %[[CMP:.*]] = cir.cmp(lt, %[[NEXT]], %[[LEN2]]) +// CIR: cir.condition(%[[CMP]]) +// CIR: } do { +// CIR: cir.scope { +// CIR: %[[SAVED_STACK2:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["saved_stack"] +// CIR: %[[I_LEN:.*]] = cir.load{{.*}} %[[I]] +// CIR: %[[I_LEN_SIZE_T2:.*]] = cir.cast integral %[[I_LEN]] : !u32i -> !u64i +// CIR: %[[STACK_PTR2:.*]] = cir.stacksave +// CIR: cir.store{{.*}} %[[STACK_PTR2]], %[[SAVED_STACK2]] +// CIR: %[[S2:.*]] = cir.alloca !s8i, !cir.ptr, %[[I_LEN_SIZE_T2]] : !u64i, ["s2"] +// CIR: %[[SAVED_RESTORE_PTR2:.*]] = cir.load{{.*}} %[[SAVED_STACK2]] +// CIR: cir.stackrestore %[[SAVED_RESTORE_PTR2]] +// CIR: } +// CIR: cir.yield +// CIR: } +// CIR: } +// CIR: %[[STACK_RESTORE_PTR:.*]] = cir.load{{.*}} %[[SAVED_STACK]] +// CIR: cir.stackrestore %[[STACK_RESTORE_PTR]] + +// LLVM: define{{.*}} void @f3(i32 %[[LEN_ARG:.*]]) { +// LLVM: %[[SAVED_STACK2:.*]] = alloca ptr +// LLVM: %[[LEN_ADDR:.*]] = alloca i32 +// LLVM: %[[SAVED_STACK:.*]] = alloca ptr +// LLVM: store i32 %[[LEN_ARG]], ptr %[[LEN_ADDR]] +// LLVM: %[[LEN:.*]] = load i32, ptr %[[LEN_ADDR]] +// LLVM: %[[LEN_SIZE_T:.*]] = zext i32 %[[LEN]] to i64 +// LLVM: %[[STACK_PTR:.*]] = call ptr @llvm.stacksave.p0() +// LLVM: store ptr %[[STACK_PTR]], ptr %[[SAVED_STACK]] +// LLVM: %[[S1:.*]] = alloca i8, i64 %[[LEN_SIZE_T]] +// LLVM: %[[I:.*]] = alloca i32 +// LLVM: store i32 0, ptr %[[I]] +// LLVM: br label %[[WHILE_START:.*]] +// LLVM: [[WHILE_START]]: +// LLVM: br label %[[WHILE_COND:.*]] +// LLVM: [[WHILE_COND]]: +// LLVM: %[[CUR_I:.*]] = load i32, ptr %[[I]] +// LLVM: %[[NEXT:.*]] = add i32 %[[CUR_I]], 1 +// LLVM: store i32 %[[NEXT]], ptr %[[I]] +// LLVM: %[[LEN2:.*]] = load i32, ptr %[[LEN_ADDR]] +// LLVM: %[[CMP:.*]] = icmp ult i32 %[[NEXT]], %[[LEN2]] +// LLVM: br i1 %[[CMP]], label %[[WHILE_BODY:.*]], label %[[WHILE_END:.*]] +// LLVM: [[WHILE_BODY]]: +// LLVM: br label %[[WHILE_BODY2:.*]] +// LLVM: [[WHILE_BODY2]]: +// LLVM: %[[I_LEN:.*]] = load i32, ptr %[[I]] +// LLVM: %[[I_LEN_SIZE_T2:.*]] = zext i32 %[[I_LEN]] to i64 +// LLVM: %[[STACK_PTR2:.*]] = call ptr @llvm.stacksave.p0() +// LLVM: store ptr %[[STACK_PTR2]], ptr %[[SAVED_STACK2]] +// LLVM: %[[S2:.*]] = alloca i8, i64 %[[I_LEN_SIZE_T2]] +// LLVM: %[[STACK_RESTORE_PTR2:.*]] = load ptr, ptr %[[SAVED_STACK2]] +// LLVM: call void @llvm.stackrestore.p0(ptr %[[STACK_RESTORE_PTR2]]) +// LLVM: br label %[[WHILE_BODY_END:.*]] +// LLVM: [[WHILE_BODY_END]]: +// LLVM: br label %[[WHILE_COND]] +// LLVM: [[WHILE_END]]: +// LLVM: br label %[[F3_END:.*]] +// LLVM: [[F3_END]]: +// LLVM: %[[STACK_RESTORE_PTR:.*]] = load ptr, ptr %[[SAVED_STACK]] +// LLVM: call void @llvm.stackrestore.p0(ptr %[[STACK_RESTORE_PTR]]) + +// Note: VLA_EXPR0 and VLA_EXPR1 below are emitted to capture debug info. + +// OGCG: define{{.*}} void @f3(i32 {{.*}} %[[LEN_ARG:.*]]) +// OGCG: %[[LEN_ADDR:.*]] = alloca i32 +// OGCG: %[[SAVED_STACK:.*]] = alloca ptr +// OGCG: %[[VLA_EXPR0:.*]] = alloca i64 +// OGCG: %[[I:.*]] = alloca i32 +// OGCG: %[[SAVED_STACK1:.*]] = alloca ptr +// OGCG: %[[VLA_EXPR1:.*]] = alloca i64 +// OGCG: store i32 %[[LEN_ARG]], ptr %[[LEN_ADDR]] +// OGCG: %[[LEN:.*]] = load i32, ptr %[[LEN_ADDR]] +// OGCG: %[[LEN_SIZE_T:.*]] = zext i32 %[[LEN]] to i64 +// OGCG: %[[STACK_PTR:.*]] = call ptr @llvm.stacksave.p0() +// OGCG: store ptr %[[STACK_PTR]], ptr %[[SAVED_STACK]] +// OGCG: %[[S1:.*]] = alloca i8, i64 %[[LEN_SIZE_T]] +// OGCG: store i64 %[[LEN_SIZE_T]], ptr %[[VLA_EXPR0]] +// OGCG: br label %[[WHILE_COND:.*]] +// OGCG: [[WHILE_COND]]: +// OGCG: %[[CUR_I:.*]] = load i32, ptr %[[I]] +// OGCG: %[[NEXT:.*]] = add i32 %[[CUR_I]], 1 +// OGCG: store i32 %[[NEXT]], ptr %[[I]] +// OGCG: %[[LEN2:.*]] = load i32, ptr %[[LEN_ADDR]] +// OGCG: %[[CMP:.*]] = icmp ult i32 %[[NEXT]], %[[LEN2]] +// OGCG: br i1 %[[CMP]], label %[[WHILE_BODY:.*]], label %[[WHILE_END:.*]] +// OGCG: [[WHILE_BODY]]: +// OGCG: %[[I_LEN:.*]] = load i32, ptr %[[I]] +// OGCG: %[[I_LEN_SIZE_T:.*]] = zext i32 %[[I_LEN]] to i64 +// OGCG: %[[STACK_PTR1:.*]] = call ptr @llvm.stacksave.p0() +// OGCG: store ptr %[[STACK_PTR1]], ptr %[[SAVED_STACK1]] +// OGCG: %[[S2:.*]] = alloca i8, i64 %[[I_LEN_SIZE_T]] +// OGCG: store i64 %[[I_LEN_SIZE_T]], ptr %[[VLA_EXPR1]] +// OGCG: %[[STACK_RESTORE_PTR1:.*]] = load ptr, ptr %[[SAVED_STACK1]] +// OGCG: call void @llvm.stackrestore.p0(ptr %[[STACK_RESTORE_PTR1]]) +// OGCG: br label %[[WHILE_COND]] +// OGCG: [[WHILE_END]]: +// OGCG: %[[STACK_RESTORE_PTR:.*]] = load ptr, ptr %[[SAVED_STACK]] +// OGCG: call void @llvm.stackrestore.p0(ptr %[[STACK_RESTORE_PTR]]) + + +// The following test case is disabled because it runs into a bug (unrelated +// to VLA) in the handling of cleanups in loops with break statements. +// +// void f4(unsigned len) { +// char s1[len]; +// while (1) { +// char s2[len]; +// if (1) +// break; +// } +// } + \ No newline at end of file