From 229308d2c5902fd5567a0322dd66f7ada2f4f650 Mon Sep 17 00:00:00 2001 From: Jakub Kuderski Date: Wed, 22 Oct 2025 12:04:52 -0400 Subject: [PATCH] [flang][mlir] Migrate to free create functions. NFC. --- flang/lib/Lower/Bridge.cpp | 2 +- flang/lib/Lower/OpenMP/OpenMP.cpp | 55 ++++---- flang/lib/Optimizer/CodeGen/CodeGen.cpp | 2 +- flang/lib/Optimizer/CodeGen/CodeGenOpenMP.cpp | 7 +- .../Transforms/ACCRecipeBufferization.cpp | 6 +- .../Optimizer/OpenMP/AutomapToTargetData.cpp | 4 +- .../OpenMP/DoConcurrentConversion.cpp | 22 ++-- .../Optimizer/OpenMP/LowerWorkdistribute.cpp | 119 +++++++++--------- .../Optimizer/OpenMP/MapInfoFinalization.cpp | 37 +++--- flang/lib/Optimizer/Support/Utils.cpp | 6 +- flang/lib/Utils/OpenMP.cpp | 2 +- 11 files changed, 132 insertions(+), 130 deletions(-) diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp index acb8e114c167b..a516a44204cac 100644 --- a/flang/lib/Lower/Bridge.cpp +++ b/flang/lib/Lower/Bridge.cpp @@ -1766,7 +1766,7 @@ class FirConverter : public Fortran::lower::AbstractConverter { // to a crash due to a block with no terminator. See issue #126452. mlir::FunctionType funcType = builder->getFunction().getFunctionType(); mlir::Type resultType = funcType.getResult(0); - mlir::Value undefResult = builder->create(loc, resultType); + mlir::Value undefResult = fir::UndefOp::create(*builder, loc, resultType); genExitRoutine(false, undefResult); return; } diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp index a49961cc233c6..71067283d13f7 100644 --- a/flang/lib/Lower/OpenMP/OpenMP.cpp +++ b/flang/lib/Lower/OpenMP/OpenMP.cpp @@ -2059,37 +2059,38 @@ static void genCanonicalLoopNest( // Start lowering mlir::Value zero = firOpBuilder.createIntegerConstant(loc, loopVarType, 0); mlir::Value one = firOpBuilder.createIntegerConstant(loc, loopVarType, 1); - mlir::Value isDownwards = firOpBuilder.create( - loc, mlir::arith::CmpIPredicate::slt, loopStepVar, zero); + mlir::Value isDownwards = mlir::arith::CmpIOp::create( + firOpBuilder, loc, mlir::arith::CmpIPredicate::slt, loopStepVar, zero); // Ensure we are counting upwards. If not, negate step and swap lb and ub. mlir::Value negStep = - firOpBuilder.create(loc, zero, loopStepVar); - mlir::Value incr = firOpBuilder.create( - loc, isDownwards, negStep, loopStepVar); - mlir::Value lb = firOpBuilder.create( - loc, isDownwards, loopUBVar, loopLBVar); - mlir::Value ub = firOpBuilder.create( - loc, isDownwards, loopLBVar, loopUBVar); + mlir::arith::SubIOp::create(firOpBuilder, loc, zero, loopStepVar); + mlir::Value incr = mlir::arith::SelectOp::create( + firOpBuilder, loc, isDownwards, negStep, loopStepVar); + mlir::Value lb = mlir::arith::SelectOp::create( + firOpBuilder, loc, isDownwards, loopUBVar, loopLBVar); + mlir::Value ub = mlir::arith::SelectOp::create( + firOpBuilder, loc, isDownwards, loopLBVar, loopUBVar); // Compute the trip count assuming lb <= ub. This guarantees that the result // is non-negative and we can use unsigned arithmetic. - mlir::Value span = firOpBuilder.create( - loc, ub, lb, ::mlir::arith::IntegerOverflowFlags::nuw); + mlir::Value span = mlir::arith::SubIOp::create( + firOpBuilder, loc, ub, lb, ::mlir::arith::IntegerOverflowFlags::nuw); mlir::Value tcMinusOne = - firOpBuilder.create(loc, span, incr); - mlir::Value tcIfLooping = firOpBuilder.create( - loc, tcMinusOne, one, ::mlir::arith::IntegerOverflowFlags::nuw); + mlir::arith::DivUIOp::create(firOpBuilder, loc, span, incr); + mlir::Value tcIfLooping = + mlir::arith::AddIOp::create(firOpBuilder, loc, tcMinusOne, one, + ::mlir::arith::IntegerOverflowFlags::nuw); // Fall back to 0 if lb > ub - mlir::Value isZeroTC = firOpBuilder.create( - loc, mlir::arith::CmpIPredicate::slt, ub, lb); - mlir::Value tripcount = firOpBuilder.create( - loc, isZeroTC, zero, tcIfLooping); + mlir::Value isZeroTC = mlir::arith::CmpIOp::create( + firOpBuilder, loc, mlir::arith::CmpIPredicate::slt, ub, lb); + mlir::Value tripcount = mlir::arith::SelectOp::create( + firOpBuilder, loc, isZeroTC, zero, tcIfLooping); tripcounts.push_back(tripcount); // Create the CLI handle. - auto newcli = firOpBuilder.create(loc); + auto newcli = mlir::omp::NewCliOp::create(firOpBuilder, loc); mlir::Value cli = newcli.getResult(); clis.push_back(cli); @@ -2122,10 +2123,10 @@ static void genCanonicalLoopNest( "Expecting all block args to have been collected by now"); for (auto j : llvm::seq(numLoops)) { mlir::Value natIterNum = fir::getBase(blockArgs[j]); - mlir::Value scaled = firOpBuilder.create( - loc, natIterNum, loopStepVars[j]); - mlir::Value userVal = firOpBuilder.create( - loc, loopLBVars[j], scaled); + mlir::Value scaled = mlir::arith::MulIOp::create( + firOpBuilder, loc, natIterNum, loopStepVars[j]); + mlir::Value userVal = mlir::arith::AddIOp::create( + firOpBuilder, loc, loopLBVars[j], scaled); mlir::OpBuilder::InsertPoint insPt = firOpBuilder.saveInsertionPoint(); @@ -2198,9 +2199,9 @@ static void genTileOp(Fortran::lower::AbstractConverter &converter, gridGeneratees.reserve(numLoops); intratileGeneratees.reserve(numLoops); for ([[maybe_unused]] auto i : llvm::seq(0, sizesClause.sizes.size())) { - auto gridCLI = firOpBuilder.create(loc); + auto gridCLI = mlir::omp::NewCliOp::create(firOpBuilder, loc); gridGeneratees.push_back(gridCLI.getResult()); - auto intratileCLI = firOpBuilder.create(loc); + auto intratileCLI = mlir::omp::NewCliOp::create(firOpBuilder, loc); intratileGeneratees.push_back(intratileCLI.getResult()); } @@ -2209,8 +2210,8 @@ static void genTileOp(Fortran::lower::AbstractConverter &converter, generatees.append(gridGeneratees); generatees.append(intratileGeneratees); - firOpBuilder.create(loc, generatees, applyees, - sizesClause.sizes); + mlir::omp::TileOp::create(firOpBuilder, loc, generatees, applyees, + sizesClause.sizes); } static void genUnrollOp(Fortran::lower::AbstractConverter &converter, diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp index e71f4e3cee49c..478ab151b96d0 100644 --- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp +++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp @@ -1151,7 +1151,7 @@ struct AllocMemOpConversion : public fir::FIROpConversion { mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, llvmObjectTy); if (auto scaleSize = fir::genAllocationScaleSize(loc, heap.getInType(), ity, rewriter)) - size = rewriter.create(loc, ity, size, scaleSize); + size = mlir::LLVM::MulOp::create(rewriter, loc, ity, size, scaleSize); for (mlir::Value opnd : adaptor.getOperands()) size = mlir::LLVM::MulOp::create(rewriter, loc, ity, size, integerCast(loc, rewriter, ity, opnd)); diff --git a/flang/lib/Optimizer/CodeGen/CodeGenOpenMP.cpp b/flang/lib/Optimizer/CodeGen/CodeGenOpenMP.cpp index 381b2a29c517a..f74d635d50a75 100644 --- a/flang/lib/Optimizer/CodeGen/CodeGenOpenMP.cpp +++ b/flang/lib/Optimizer/CodeGen/CodeGenOpenMP.cpp @@ -242,10 +242,11 @@ struct TargetAllocMemOpConversion loc, llvmObjectTy, ity, rewriter, lowerTy().getDataLayout()); if (auto scaleSize = fir::genAllocationScaleSize( loc, allocmemOp.getInType(), ity, rewriter)) - size = rewriter.create(loc, ity, size, scaleSize); + size = mlir::LLVM::MulOp::create(rewriter, loc, ity, size, scaleSize); for (mlir::Value opnd : adaptor.getOperands().drop_front()) - size = rewriter.create( - loc, ity, size, integerCast(lowerTy(), loc, rewriter, ity, opnd)); + size = mlir::LLVM::MulOp::create( + rewriter, loc, ity, size, + integerCast(lowerTy(), loc, rewriter, ity, opnd)); auto mallocTyWidth = lowerTy().getIndexTypeBitwidth(); auto mallocTy = mlir::IntegerType::get(rewriter.getContext(), mallocTyWidth); diff --git a/flang/lib/Optimizer/OpenACC/Transforms/ACCRecipeBufferization.cpp b/flang/lib/Optimizer/OpenACC/Transforms/ACCRecipeBufferization.cpp index 4840a999ecd27..0d135a94588e4 100644 --- a/flang/lib/Optimizer/OpenACC/Transforms/ACCRecipeBufferization.cpp +++ b/flang/lib/Optimizer/OpenACC/Transforms/ACCRecipeBufferization.cpp @@ -39,13 +39,13 @@ class BufferizeInterface { static mlir::Operation *load(mlir::OpBuilder &builder, mlir::Location loc, mlir::Value value) { - return builder.create(loc, value); + return fir::LoadOp::create(builder, loc, value); } static mlir::Value placeInMemory(mlir::OpBuilder &builder, mlir::Location loc, mlir::Value value) { - auto alloca = builder.create(loc, value.getType()); - builder.create(loc, value, alloca); + auto alloca = fir::AllocaOp::create(builder, loc, value.getType()); + fir::StoreOp::create(builder, loc, value, alloca); return alloca; } }; diff --git a/flang/lib/Optimizer/OpenMP/AutomapToTargetData.cpp b/flang/lib/Optimizer/OpenMP/AutomapToTargetData.cpp index 817434ff3dc30..5793d46a192a7 100644 --- a/flang/lib/Optimizer/OpenMP/AutomapToTargetData.cpp +++ b/flang/lib/Optimizer/OpenMP/AutomapToTargetData.cpp @@ -130,8 +130,8 @@ class AutomapToTargetDataPass builder.getBoolAttr(false)); clauses.mapVars.push_back(mapInfo); isa(memOp) - ? builder.create(memOp.getLoc(), clauses) - : builder.create(memOp.getLoc(), clauses); + ? omp::TargetEnterDataOp::create(builder, memOp.getLoc(), clauses) + : omp::TargetExitDataOp::create(builder, memOp.getLoc(), clauses); }; for (fir::GlobalOp globalOp : automapGlobals) { diff --git a/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp b/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp index 65a23be243716..1229018bd9b3e 100644 --- a/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp +++ b/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp @@ -595,7 +595,7 @@ class DoConcurrentConversion mlir::omp::TargetOperands &clauseOps, mlir::omp::LoopNestOperands &loopNestClauseOps, const LiveInShapeInfoMap &liveInShapeInfoMap) const { - auto targetOp = rewriter.create(loc, clauseOps); + auto targetOp = mlir::omp::TargetOp::create(rewriter, loc, clauseOps); auto argIface = llvm::cast(*targetOp); mlir::Region ®ion = targetOp.getRegion(); @@ -672,7 +672,7 @@ class DoConcurrentConversion // temporary. Fortran::utils::openmp::cloneOrMapRegionOutsiders(builder, targetOp); rewriter.setInsertionPoint( - rewriter.create(targetOp.getLoc())); + mlir::omp::TerminatorOp::create(rewriter, targetOp.getLoc())); return targetOp; } @@ -715,8 +715,8 @@ class DoConcurrentConversion auto shapeShiftType = fir::ShapeShiftType::get( builder.getContext(), shapeShiftOperands.size() / 2); - return builder.create( - liveInArg.getLoc(), shapeShiftType, shapeShiftOperands); + return fir::ShapeShiftOp::create(builder, liveInArg.getLoc(), + shapeShiftType, shapeShiftOperands); } llvm::SmallVector shapeOperands; @@ -728,11 +728,11 @@ class DoConcurrentConversion ++shapeIdx; } - return builder.create(liveInArg.getLoc(), shapeOperands); + return fir::ShapeOp::create(builder, liveInArg.getLoc(), shapeOperands); }(); - return builder.create(liveInArg.getLoc(), liveInArg, - liveInName, shape); + return hlfir::DeclareOp::create(builder, liveInArg.getLoc(), liveInArg, + liveInName, shape); } mlir::omp::TeamsOp genTeamsOp(mlir::ConversionPatternRewriter &rewriter, @@ -742,13 +742,13 @@ class DoConcurrentConversion genReductions(rewriter, mapper, loop, teamsOps); mlir::Location loc = loop.getLoc(); - auto teamsOp = rewriter.create(loc, teamsOps); + auto teamsOp = mlir::omp::TeamsOp::create(rewriter, loc, teamsOps); Fortran::common::openmp::EntryBlockArgs teamsArgs; teamsArgs.reduction.vars = teamsOps.reductionVars; Fortran::common::openmp::genEntryBlock(rewriter, teamsArgs, teamsOp.getRegion()); - rewriter.setInsertionPoint(rewriter.create(loc)); + rewriter.setInsertionPoint(mlir::omp::TerminatorOp::create(rewriter, loc)); for (auto [loopVar, teamsArg] : llvm::zip_equal( loop.getReduceVars(), teamsOp.getRegion().getArguments())) { @@ -761,8 +761,8 @@ class DoConcurrentConversion mlir::omp::DistributeOp genDistributeOp(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter) const { - auto distOp = rewriter.create( - loc, /*clauses=*/mlir::omp::DistributeOperands{}); + auto distOp = mlir::omp::DistributeOp::create( + rewriter, loc, /*clauses=*/mlir::omp::DistributeOperands{}); rewriter.createBlock(&distOp.getRegion()); return distOp; diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkdistribute.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkdistribute.cpp index 8a9b383ec1356..7b61539984232 100644 --- a/flang/lib/Optimizer/OpenMP/LowerWorkdistribute.cpp +++ b/flang/lib/Optimizer/OpenMP/LowerWorkdistribute.cpp @@ -282,14 +282,14 @@ fissionWorkdistribute(omp::WorkdistributeOp workdistribute) { &newTeams.getRegion(), newTeams.getRegion().begin(), {}, {}); for (auto arg : teamsBlock->getArguments()) newTeamsBlock->addArgument(arg.getType(), arg.getLoc()); - auto newWorkdistribute = rewriter.create(loc); - rewriter.create(loc); + auto newWorkdistribute = omp::WorkdistributeOp::create(rewriter, loc); + omp::TerminatorOp::create(rewriter, loc); rewriter.createBlock(&newWorkdistribute.getRegion(), newWorkdistribute.getRegion().begin(), {}, {}); auto *cloned = rewriter.clone(*parallelize); parallelize->replaceAllUsesWith(cloned); parallelize->erase(); - rewriter.create(loc); + omp::TerminatorOp::create(rewriter, loc); changed = true; } } @@ -298,10 +298,10 @@ fissionWorkdistribute(omp::WorkdistributeOp workdistribute) { /// Generate omp.parallel operation with an empty region. static void genParallelOp(Location loc, OpBuilder &rewriter, bool composite) { - auto parallelOp = rewriter.create(loc); + auto parallelOp = mlir::omp::ParallelOp::create(rewriter, loc); parallelOp.setComposite(composite); rewriter.createBlock(¶llelOp.getRegion()); - rewriter.setInsertionPoint(rewriter.create(loc)); + rewriter.setInsertionPoint(mlir::omp::TerminatorOp::create(rewriter, loc)); return; } @@ -309,7 +309,7 @@ static void genParallelOp(Location loc, OpBuilder &rewriter, bool composite) { static void genDistributeOp(Location loc, OpBuilder &rewriter, bool composite) { mlir::omp::DistributeOperands distributeClauseOps; auto distributeOp = - rewriter.create(loc, distributeClauseOps); + mlir::omp::DistributeOp::create(rewriter, loc, distributeClauseOps); distributeOp.setComposite(composite); auto distributeBlock = rewriter.createBlock(&distributeOp.getRegion()); rewriter.setInsertionPointToStart(distributeBlock); @@ -334,12 +334,12 @@ static void genWsLoopOp(mlir::OpBuilder &rewriter, fir::DoLoopOp doLoop, const mlir::omp::LoopNestOperands &clauseOps, bool composite) { - auto wsloopOp = rewriter.create(doLoop.getLoc()); + auto wsloopOp = mlir::omp::WsloopOp::create(rewriter, doLoop.getLoc()); wsloopOp.setComposite(composite); rewriter.createBlock(&wsloopOp.getRegion()); auto loopNestOp = - rewriter.create(doLoop.getLoc(), clauseOps); + mlir::omp::LoopNestOp::create(rewriter, doLoop.getLoc(), clauseOps); // Clone the loop's body inside the loop nest construct using the // mapped values. @@ -351,7 +351,7 @@ static void genWsLoopOp(mlir::OpBuilder &rewriter, fir::DoLoopOp doLoop, // Erase fir.result op of do loop and create yield op. if (auto resultOp = dyn_cast(terminatorOp)) { rewriter.setInsertionPoint(terminatorOp); - rewriter.create(doLoop->getLoc()); + mlir::omp::YieldOp::create(rewriter, doLoop->getLoc()); terminatorOp->erase(); } } @@ -494,15 +494,15 @@ static SmallVector convertFlatToMultiDim(OpBuilder &builder, // Convert flat index to multi-dimensional indices SmallVector indices(rank); Value temp = flatIdx; - auto c1 = builder.create(loc, 1); + auto c1 = arith::ConstantIndexOp::create(builder, loc, 1); // Work backwards through dimensions (row-major order) for (int i = rank - 1; i >= 0; --i) { - Value zeroBasedIdx = builder.create(loc, temp, extents[i]); + Value zeroBasedIdx = arith::RemSIOp::create(builder, loc, temp, extents[i]); // Convert to one-based index - indices[i] = builder.create(loc, zeroBasedIdx, c1); + indices[i] = arith::AddIOp::create(builder, loc, zeroBasedIdx, c1); if (i > 0) { - temp = builder.create(loc, temp, extents[i]); + temp = arith::DivSIOp::create(builder, loc, temp, extents[i]); } } @@ -525,7 +525,7 @@ static Value CalculateTotalElements(OpBuilder &builder, Location loc, if (i == 0) { totalElems = extent; } else { - totalElems = builder.create(loc, totalElems, extent); + totalElems = arith::MulIOp::create(builder, loc, totalElems, extent); } } return totalElems; @@ -562,14 +562,14 @@ static void replaceWithUnorderedDoLoop(OpBuilder &builder, Location loc, // Load destination array box (if it's a reference) Value arrayBox = destBox; if (isa(destBox.getType())) - arrayBox = builder.create(loc, destBox); + arrayBox = fir::LoadOp::create(builder, loc, destBox); - auto scalarValue = builder.create(loc, srcBox); - Value scalar = builder.create(loc, scalarValue); + auto scalarValue = fir::BoxAddrOp::create(builder, loc, srcBox); + Value scalar = fir::LoadOp::create(builder, loc, scalarValue); // Calculate total number of elements (flattened) - auto c0 = builder.create(loc, 0); - auto c1 = builder.create(loc, 1); + auto c0 = arith::ConstantIndexOp::create(builder, loc, 0); + auto c1 = arith::ConstantIndexOp::create(builder, loc, 1); Value totalElems = CalculateTotalElements(builder, loc, arrayBox); auto *workdistributeBlock = &workdistribute.getRegion().front(); @@ -587,7 +587,7 @@ static void replaceWithUnorderedDoLoop(OpBuilder &builder, Location loc, builder, loc, fir::ReferenceType::get(scalar.getType()), arrayBox, nullptr, nullptr, ValueRange{indices}, ValueRange{}); - builder.create(loc, scalar, elemPtr); + fir::StoreOp::create(builder, loc, scalar, elemPtr); } /// workdistributeRuntimeCallLower method finds the runtime calls @@ -749,14 +749,15 @@ FailureOr splitTargetData(omp::TargetOp targetOp, auto deviceAddrVars = targetOp.getHasDeviceAddrVars(); auto devicePtrVars = targetOp.getIsDevicePtrVars(); // Create the target data op - auto targetDataOp = rewriter.create( - loc, device, ifExpr, outerMapInfos, deviceAddrVars, devicePtrVars); + auto targetDataOp = + omp::TargetDataOp::create(rewriter, loc, device, ifExpr, outerMapInfos, + deviceAddrVars, devicePtrVars); auto taregtDataBlock = rewriter.createBlock(&targetDataOp.getRegion()); - rewriter.create(loc); + mlir::omp::TerminatorOp::create(rewriter, loc); rewriter.setInsertionPointToStart(taregtDataBlock); // Create the inner target op - auto newTargetOp = rewriter.create( - targetOp.getLoc(), targetOp.getAllocateVars(), + auto newTargetOp = omp::TargetOp::create( + rewriter, targetOp.getLoc(), targetOp.getAllocateVars(), targetOp.getAllocatorVars(), targetOp.getBareAttr(), targetOp.getDependKindsAttr(), targetOp.getDependVars(), targetOp.getDevice(), targetOp.getHasDeviceAddrVars(), @@ -821,19 +822,19 @@ static TempOmpVar allocateTempOmpVar(Location loc, Type ty, // Get the appropriate type for allocation if (isPtr(ty)) { Type intTy = rewriter.getI32Type(); - auto one = rewriter.create(loc, intTy, 1); + auto one = LLVM::ConstantOp::create(rewriter, loc, intTy, 1); allocType = llvmPtrTy; - alloc = rewriter.create(loc, llvmPtrTy, allocType, one); + alloc = LLVM::AllocaOp::create(rewriter, loc, llvmPtrTy, allocType, one); allocType = intTy; } else { allocType = ty; - alloc = rewriter.create(loc, allocType); + alloc = fir::AllocaOp::create(rewriter, loc, allocType); } // Lambda to create mapinfo ops auto getMapInfo = [&](mlir::omp::ClauseMapFlags mappingFlags, const char *name) { - return rewriter.create( - loc, alloc.getType(), alloc, TypeAttr::get(allocType), + return omp::MapInfoOp::create( + rewriter, loc, alloc.getType(), alloc, TypeAttr::get(allocType), rewriter.getAttr(mappingFlags), rewriter.getAttr( omp::VariableCaptureKind::ByRef), @@ -979,12 +980,12 @@ static void reloadCacheAndRecompute( // If the original value is a pointer or reference, load and convert if // necessary. if (isPtr(original.getType())) { - restored = rewriter.create(loc, llvmPtrTy, newArg); + restored = LLVM::LoadOp::create(rewriter, loc, llvmPtrTy, newArg); if (!isa(original.getType())) restored = - rewriter.create(loc, original.getType(), restored); + fir::ConvertOp::create(rewriter, loc, original.getType(), restored); } else { - restored = rewriter.create(loc, newArg); + restored = fir::LoadOp::create(rewriter, loc, newArg); } irMapping.map(original, restored); } @@ -1053,7 +1054,7 @@ static mlir::LLVM::ConstantOp genI32Constant(mlir::Location loc, mlir::RewriterBase &rewriter, int value) { mlir::Type i32Ty = rewriter.getI32Type(); mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value); - return rewriter.create(loc, i32Ty, attr); + return mlir::LLVM::ConstantOp::create(rewriter, loc, i32Ty, attr); } /// Given a box descriptor, extract the base address of the data it describes. @@ -1230,8 +1231,8 @@ static void genFortranAssignOmpReplacement(fir::FirOpBuilder &builder, genOmpGetMappedPtrIfPresent(builder, loc, destBase, device, module); Value srcPtr = genOmpGetMappedPtrIfPresent(builder, loc, srcBase, device, module); - Value zero = builder.create(loc, builder.getI64Type(), - builder.getI64IntegerAttr(0)); + Value zero = LLVM::ConstantOp::create(builder, loc, builder.getI64Type(), + builder.getI64IntegerAttr(0)); // Generate the call to omp_target_memcpy to perform the data copy on the // device. @@ -1348,23 +1349,24 @@ static LogicalResult moveToHost(omp::TargetOp targetOp, RewriterBase &rewriter, for (Operation *op : opsToReplace) { if (auto allocOp = dyn_cast(op)) { rewriter.setInsertionPoint(allocOp); - auto ompAllocmemOp = rewriter.create( - allocOp.getLoc(), rewriter.getI64Type(), device, + auto ompAllocmemOp = omp::TargetAllocMemOp::create( + rewriter, allocOp.getLoc(), rewriter.getI64Type(), device, allocOp.getInTypeAttr(), allocOp.getUniqNameAttr(), allocOp.getBindcNameAttr(), allocOp.getTypeparams(), allocOp.getShape()); - auto firConvertOp = rewriter.create( - allocOp.getLoc(), allocOp.getResult().getType(), - ompAllocmemOp.getResult()); + auto firConvertOp = fir::ConvertOp::create(rewriter, allocOp.getLoc(), + allocOp.getResult().getType(), + ompAllocmemOp.getResult()); rewriter.replaceOp(allocOp, firConvertOp.getResult()); } // Replace fir.freemem with omp.target_freemem. else if (auto freeOp = dyn_cast(op)) { rewriter.setInsertionPoint(freeOp); - auto firConvertOp = rewriter.create( - freeOp.getLoc(), rewriter.getI64Type(), freeOp.getHeapref()); - rewriter.create(freeOp.getLoc(), device, - firConvertOp.getResult()); + auto firConvertOp = + fir::ConvertOp::create(rewriter, freeOp.getLoc(), + rewriter.getI64Type(), freeOp.getHeapref()); + omp::TargetFreeMemOp::create(rewriter, freeOp.getLoc(), device, + firConvertOp.getResult()); rewriter.eraseOp(freeOp); } // fir.declare changes its type when hoisting it out of omp.target to @@ -1376,8 +1378,9 @@ static LogicalResult moveToHost(omp::TargetOp targetOp, RewriterBase &rewriter, dyn_cast(clonedInType); Type clonedEleTy = clonedRefType.getElementType(); rewriter.setInsertionPoint(op); - Value loadedValue = rewriter.create( - clonedDeclareOp.getLoc(), clonedEleTy, clonedDeclareOp.getMemref()); + Value loadedValue = + fir::LoadOp::create(rewriter, clonedDeclareOp.getLoc(), clonedEleTy, + clonedDeclareOp.getMemref()); clonedDeclareOp.getResult().replaceAllUsesWith(loadedValue); } // Replace runtime calls with omp versions. @@ -1473,8 +1476,8 @@ genPreTargetOp(omp::TargetOp targetOp, SmallVector &preMapOperands, auto *targetBlock = &targetOp.getRegion().front(); SmallVector preHostEvalVars{targetOp.getHostEvalVars()}; // update the hostEvalVars of preTargetOp - omp::TargetOp preTargetOp = rewriter.create( - targetOp.getLoc(), targetOp.getAllocateVars(), + omp::TargetOp preTargetOp = omp::TargetOp::create( + rewriter, targetOp.getLoc(), targetOp.getAllocateVars(), targetOp.getAllocatorVars(), targetOp.getBareAttr(), targetOp.getDependKindsAttr(), targetOp.getDependVars(), targetOp.getDevice(), targetOp.getHasDeviceAddrVars(), preHostEvalVars, @@ -1513,13 +1516,13 @@ genPreTargetOp(omp::TargetOp targetOp, SmallVector &preMapOperands, // Create the store operation. if (isPtr(originalResult.getType())) { if (!isa(toStore.getType())) - toStore = rewriter.create(loc, llvmPtrTy, toStore); - rewriter.create(loc, toStore, newArg); + toStore = fir::ConvertOp::create(rewriter, loc, llvmPtrTy, toStore); + LLVM::StoreOp::create(rewriter, loc, toStore, newArg); } else { - rewriter.create(loc, toStore, newArg); + fir::StoreOp::create(rewriter, loc, toStore, newArg); } } - rewriter.create(loc); + omp::TerminatorOp::create(rewriter, loc); // Update hostEvalVars with the mapped values for the loop bounds if we have // a loopNestOp and we are not generating code for the target device. @@ -1563,8 +1566,8 @@ genIsolatedTargetOp(omp::TargetOp targetOp, SmallVector &postMapOperands, hostEvalVars.steps.end()); } // Create the isolated target op - omp::TargetOp isolatedTargetOp = rewriter.create( - targetOp.getLoc(), targetOp.getAllocateVars(), + omp::TargetOp isolatedTargetOp = omp::TargetOp::create( + rewriter, targetOp.getLoc(), targetOp.getAllocateVars(), targetOp.getAllocatorVars(), targetOp.getBareAttr(), targetOp.getDependKindsAttr(), targetOp.getDependVars(), targetOp.getDevice(), targetOp.getHasDeviceAddrVars(), @@ -1590,7 +1593,7 @@ genIsolatedTargetOp(omp::TargetOp targetOp, SmallVector &postMapOperands, // Clone the original operations. rewriter.clone(*splitBeforeOp, isolatedMapping); - rewriter.create(loc); + omp::TerminatorOp::create(rewriter, loc); // update the loop bounds in the isolatedTargetOp if we have host_eval vars // and we are not generating code for the target device. @@ -1643,8 +1646,8 @@ static omp::TargetOp genPostTargetOp(omp::TargetOp targetOp, auto *targetBlock = &targetOp.getRegion().front(); SmallVector postHostEvalVars{targetOp.getHostEvalVars()}; // Create the post target op - omp::TargetOp postTargetOp = rewriter.create( - targetOp.getLoc(), targetOp.getAllocateVars(), + omp::TargetOp postTargetOp = omp::TargetOp::create( + rewriter, targetOp.getLoc(), targetOp.getAllocateVars(), targetOp.getAllocatorVars(), targetOp.getBareAttr(), targetOp.getDependKindsAttr(), targetOp.getDependVars(), targetOp.getDevice(), targetOp.getHasDeviceAddrVars(), postHostEvalVars, diff --git a/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp b/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp index 566e88b9d6588..bd07d7fe01b85 100644 --- a/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp +++ b/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp @@ -883,18 +883,16 @@ class MapInfoFinalizationPass if (explicitMappingPresent(op, targetDataOp)) return; - mlir::omp::MapInfoOp newDescParentMapOp = - builder.create( - op->getLoc(), op.getResult().getType(), op.getVarPtr(), - op.getVarTypeAttr(), - builder.getAttr( - mlir::omp::ClauseMapFlags::to | - mlir::omp::ClauseMapFlags::always), - op.getMapCaptureTypeAttr(), /*varPtrPtr=*/mlir::Value{}, - mlir::SmallVector{}, mlir::ArrayAttr{}, - /*bounds=*/mlir::SmallVector{}, - /*mapperId*/ mlir::FlatSymbolRefAttr(), op.getNameAttr(), - /*partial_map=*/builder.getBoolAttr(false)); + mlir::omp::MapInfoOp newDescParentMapOp = mlir::omp::MapInfoOp::create( + builder, op->getLoc(), op.getResult().getType(), op.getVarPtr(), + op.getVarTypeAttr(), + builder.getAttr( + mlir::omp::ClauseMapFlags::to | mlir::omp::ClauseMapFlags::always), + op.getMapCaptureTypeAttr(), /*varPtrPtr=*/mlir::Value{}, + mlir::SmallVector{}, mlir::ArrayAttr{}, + /*bounds=*/mlir::SmallVector{}, + /*mapperId*/ mlir::FlatSymbolRefAttr(), op.getNameAttr(), + /*partial_map=*/builder.getBoolAttr(false)); targetDataOp.getMapVarsMutable().append({newDescParentMapOp}); } @@ -946,14 +944,13 @@ class MapInfoFinalizationPass // need to see how well this alteration works. auto loadBaseAddr = builder.loadIfRef(op->getLoc(), baseAddr.getVarPtrPtr()); - mlir::omp::MapInfoOp newBaseAddrMapOp = - builder.create( - op->getLoc(), loadBaseAddr.getType(), loadBaseAddr, - baseAddr.getVarTypeAttr(), baseAddr.getMapTypeAttr(), - baseAddr.getMapCaptureTypeAttr(), mlir::Value{}, members, - membersAttr, baseAddr.getBounds(), - /*mapperId*/ mlir::FlatSymbolRefAttr(), op.getNameAttr(), - /*partial_map=*/builder.getBoolAttr(false)); + mlir::omp::MapInfoOp newBaseAddrMapOp = mlir::omp::MapInfoOp::create( + builder, op->getLoc(), loadBaseAddr.getType(), loadBaseAddr, + baseAddr.getVarTypeAttr(), baseAddr.getMapTypeAttr(), + baseAddr.getMapCaptureTypeAttr(), mlir::Value{}, members, membersAttr, + baseAddr.getBounds(), + /*mapperId*/ mlir::FlatSymbolRefAttr(), op.getNameAttr(), + /*partial_map=*/builder.getBoolAttr(false)); op.replaceAllUsesWith(newBaseAddrMapOp.getResult()); op->erase(); baseAddr.erase(); diff --git a/flang/lib/Optimizer/Support/Utils.cpp b/flang/lib/Optimizer/Support/Utils.cpp index 92390e4a3a230..2f33d89564796 100644 --- a/flang/lib/Optimizer/Support/Utils.cpp +++ b/flang/lib/Optimizer/Support/Utils.cpp @@ -66,7 +66,7 @@ fir::genConstantIndex(mlir::Location loc, mlir::Type ity, mlir::ConversionPatternRewriter &rewriter, std::int64_t offset) { auto cattr = rewriter.getI64IntegerAttr(offset); - return rewriter.create(loc, ity, cattr); + return mlir::LLVM::ConstantOp::create(rewriter, loc, ity, cattr); } mlir::Value @@ -125,9 +125,9 @@ mlir::Value fir::integerCast(const fir::LLVMTypeConverter &converter, return rewriter.createOrFold(loc, ty, val); } else { if (toSize < fromSize) - return rewriter.create(loc, ty, val); + return mlir::LLVM::TruncOp::create(rewriter, loc, ty, val); if (toSize > fromSize) - return rewriter.create(loc, ty, val); + return mlir::LLVM::SExtOp::create(rewriter, loc, ty, val); } return val; } diff --git a/flang/lib/Utils/OpenMP.cpp b/flang/lib/Utils/OpenMP.cpp index 15a42c3f50866..c2036c4a383fd 100644 --- a/flang/lib/Utils/OpenMP.cpp +++ b/flang/lib/Utils/OpenMP.cpp @@ -112,7 +112,7 @@ mlir::Value mapTemporaryValue(fir::FirOpBuilder &firOpBuilder, mlir::Block *entryBlock = ®ion.getBlocks().front(); firOpBuilder.setInsertionPointToStart(entryBlock); auto loadOp = - firOpBuilder.create(clonedValArg.getLoc(), clonedValArg); + fir::LoadOp::create(firOpBuilder, clonedValArg.getLoc(), clonedValArg); return loadOp.getResult(); }