Skip to content

Commit c4e2706

Browse files
committed
[Flang][OpenMP] Add pass to replace allocas with device shared memory
This patch introduces a new Flang OpenMP MLIR pass, only ran for target device modules, that identifies `fir.alloca` operations that should use device shared memory and replaces them with pairs of `omp.alloc_shared_mem` and `omp.free_shared_mem` operations. This works in conjunction to the MLIR to LLVM IR translation pass' handling of privatization, mapping and reductions in the OpenMP dialect to properly select the right memory space for allocations based on where they are made and where they are used. This pass, in particular, handles explicit stack allocations in MLIR, whereas the aforementioned translation pass takes care of implicit ones represented by entry block arguments.
1 parent d913966 commit c4e2706

File tree

5 files changed

+398
-1
lines changed

5 files changed

+398
-1
lines changed

flang/include/flang/Optimizer/OpenMP/Passes.td

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -128,4 +128,21 @@ def AutomapToTargetDataPass
128128
let dependentDialects = ["mlir::omp::OpenMPDialect"];
129129
}
130130

131+
def StackToSharedPass : Pass<"omp-stack-to-shared", "mlir::func::FuncOp"> {
132+
let summary = "Replaces stack allocations with shared memory.";
133+
let description = [{
134+
`fir.alloca` operations defining values in a target region and then used
135+
inside of an `omp.parallel` region are replaced by this pass with
136+
`omp.alloc_shared_mem` and `omp.free_shared_mem`. This is also done for
137+
top-level function `fir.alloca`s used in the same way when the parent
138+
function is a target device function.
139+
140+
This ensures that explicit private allocations, intended to be shared across
141+
threads, use the proper memory space on a target device while supporting the
142+
case of parallel regions indirectly reached from within a target region via
143+
function calls.
144+
}];
145+
let dependentDialects = ["mlir::omp::OpenMPDialect"];
146+
}
147+
131148
#endif //FORTRAN_OPTIMIZER_OPENMP_PASSES

flang/lib/Optimizer/OpenMP/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ add_flang_library(FlangOpenMPTransforms
1111
LowerWorkshare.cpp
1212
LowerNontemporal.cpp
1313
SimdOnly.cpp
14+
StackToShared.cpp
1415

1516
DEPENDS
1617
FIRDialect
Lines changed: 162 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,162 @@
1+
//===- StackToShared.cpp -------------------------------------------===//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
//
9+
// This file implements transforms to swap stack allocations on the target
10+
// device with device shared memory where applicable.
11+
//
12+
//===----------------------------------------------------------------------===//
13+
14+
#include "flang/Optimizer/Dialect/FIROps.h"
15+
#include "flang/Optimizer/HLFIR/HLFIROps.h"
16+
#include "flang/Optimizer/OpenMP/Passes.h"
17+
#include "mlir/Dialect/Func/IR/FuncOps.h"
18+
#include "mlir/Dialect/OpenMP/OpenMPDialect.h"
19+
#include "mlir/Dialect/OpenMP/OpenMPInterfaces.h"
20+
21+
namespace flangomp {
22+
#define GEN_PASS_DEF_STACKTOSHAREDPASS
23+
#include "flang/Optimizer/OpenMP/Passes.h.inc"
24+
} // namespace flangomp
25+
26+
using namespace mlir;
27+
28+
namespace {
29+
class StackToSharedPass
30+
: public flangomp::impl::StackToSharedPassBase<StackToSharedPass> {
31+
public:
32+
StackToSharedPass() = default;
33+
34+
void runOnOperation() override {
35+
MLIRContext *context = &getContext();
36+
OpBuilder builder(context);
37+
38+
func::FuncOp funcOp = getOperation();
39+
auto offloadIface = funcOp->getParentOfType<omp::OffloadModuleInterface>();
40+
if (!offloadIface || !offloadIface.getIsTargetDevice())
41+
return;
42+
43+
funcOp->walk([&](fir::AllocaOp allocaOp) {
44+
if (!shouldReplaceAlloca(*allocaOp))
45+
return;
46+
47+
// Replace fir.alloca with omp.alloc_shared_mem.
48+
builder.setInsertionPoint(allocaOp);
49+
auto sharedAllocOp = omp::AllocSharedMemOp::create(
50+
builder, allocaOp->getLoc(), allocaOp.getResult().getType(),
51+
allocaOp.getInType(), allocaOp.getUniqNameAttr(),
52+
allocaOp.getBindcNameAttr(), allocaOp.getTypeparams(),
53+
allocaOp.getShape());
54+
allocaOp.replaceAllUsesWith(sharedAllocOp.getOperation());
55+
allocaOp.erase();
56+
57+
// Create a new omp.free_shared_mem for the allocated buffer prior to
58+
// exiting the region.
59+
Block *allocaBlock = sharedAllocOp->getBlock();
60+
DominanceInfo domInfo;
61+
for (Block &block : sharedAllocOp->getParentRegion()->getBlocks()) {
62+
Operation *terminator = block.getTerminator();
63+
if (!terminator->hasSuccessors() &&
64+
domInfo.dominates(allocaBlock, &block)) {
65+
builder.setInsertionPoint(terminator);
66+
omp::FreeSharedMemOp::create(builder, sharedAllocOp.getLoc(),
67+
sharedAllocOp);
68+
}
69+
}
70+
});
71+
}
72+
73+
private:
74+
// TODO: Refactor the logic in `shouldReplaceAlloca` and `checkAllocaUses` to
75+
// be reusable by the MLIR to LLVM IR translation stage, as something very
76+
// similar is also implemented there to choose between allocas and device
77+
// shared memory allocations when processing OpenMP reductions, mapping and
78+
// privatization.
79+
80+
// Decide whether to replace a fir.alloca with a pair of device shared memory
81+
// allocation/deallocation pair based on the location of the allocation and
82+
// its uses.
83+
//
84+
// In summary, it should be done whenever the allocation is placed outside any
85+
// parallel regions and inside either a target device function or a generic
86+
// kernel, while being used inside of a parallel region.
87+
bool shouldReplaceAlloca(Operation &op) {
88+
auto targetOp = op.getParentOfType<omp::TargetOp>();
89+
90+
// It must be inside of a generic omp.target or in a target device function,
91+
// and not inside of omp.parallel.
92+
if (auto parallelOp = op.getParentOfType<omp::ParallelOp>()) {
93+
if (!targetOp || !targetOp->isProperAncestor(parallelOp))
94+
return false;
95+
}
96+
97+
if (targetOp) {
98+
if (targetOp.getKernelExecFlags(targetOp.getInnermostCapturedOmpOp()) !=
99+
mlir::omp::TargetExecMode::generic)
100+
return false;
101+
} else {
102+
auto declTargetIface = dyn_cast<mlir::omp::DeclareTargetInterface>(
103+
*op.getParentOfType<func::FuncOp>());
104+
if (!declTargetIface || !declTargetIface.isDeclareTarget() ||
105+
declTargetIface.getDeclareTargetDeviceType() ==
106+
mlir::omp::DeclareTargetDeviceType::host)
107+
return false;
108+
}
109+
110+
return checkAllocaUses(op.getUses());
111+
}
112+
113+
// When a use takes place inside an omp.parallel region and it's not as a
114+
// private clause argument, or when it is a reduction argument passed to
115+
// omp.parallel, then the defining allocation is eligible for replacement with
116+
// shared memory.
117+
//
118+
// Only one of the uses needs to meet these conditions to return true.
119+
bool checkAllocaUses(const Operation::use_range &uses) {
120+
auto checkUse = [&](const OpOperand &use) {
121+
Operation *owner = use.getOwner();
122+
auto moduleOp = owner->getParentOfType<ModuleOp>();
123+
if (auto parallelOp = dyn_cast<omp::ParallelOp>(owner)) {
124+
if (llvm::is_contained(parallelOp.getReductionVars(), use.get()))
125+
return true;
126+
} else if (owner->getParentOfType<omp::ParallelOp>()) {
127+
// If it is used directly inside of a parallel region, it has to be
128+
// replaced unless the use is a private clause.
129+
if (auto argIface = dyn_cast<omp::BlockArgOpenMPOpInterface>(owner)) {
130+
if (auto privateSyms = llvm::cast_or_null<ArrayAttr>(
131+
owner->getAttr("private_syms"))) {
132+
for (auto [var, sym] :
133+
llvm::zip_equal(argIface.getPrivateVars(), privateSyms)) {
134+
if (var != use.get())
135+
continue;
136+
137+
auto privateOp = cast<omp::PrivateClauseOp>(
138+
moduleOp.lookupSymbol(cast<SymbolRefAttr>(sym)));
139+
return privateOp.getDataSharingType() !=
140+
omp::DataSharingClauseType::Private;
141+
}
142+
}
143+
}
144+
return true;
145+
}
146+
return false;
147+
};
148+
149+
// Check direct uses and also follow hlfir.declare uses.
150+
for (const OpOperand &use : uses) {
151+
if (auto declareOp = dyn_cast<hlfir::DeclareOp>(use.getOwner())) {
152+
if (checkAllocaUses(declareOp->getUses()))
153+
return true;
154+
} else if (checkUse(use)) {
155+
return true;
156+
}
157+
}
158+
159+
return false;
160+
}
161+
};
162+
} // namespace

flang/lib/Optimizer/Passes/Pipelines.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -335,8 +335,10 @@ void createOpenMPFIRPassPipeline(mlir::PassManager &pm,
335335
pm.addPass(flangomp::createMapInfoFinalizationPass());
336336
pm.addPass(flangomp::createMarkDeclareTargetPass());
337337
pm.addPass(flangomp::createGenericLoopConversionPass());
338-
if (opts.isTargetDevice)
338+
if (opts.isTargetDevice) {
339+
pm.addPass(flangomp::createStackToSharedPass());
339340
pm.addPass(flangomp::createFunctionFilteringPass());
341+
}
340342
}
341343

342344
void createDebugPasses(mlir::PassManager &pm,

0 commit comments

Comments
 (0)